os_aix.cpp revision 9988:3a7618a9f2d6
1/* 2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2012, 2015 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26// According to the AIX OS doc #pragma alloca must be used 27// with C++ compiler before referencing the function alloca() 28#pragma alloca 29 30// no precompiled headers 31#include "classfile/classLoader.hpp" 32#include "classfile/systemDictionary.hpp" 33#include "classfile/vmSymbols.hpp" 34#include "code/icBuffer.hpp" 35#include "code/vtableStubs.hpp" 36#include "compiler/compileBroker.hpp" 37#include "interpreter/interpreter.hpp" 38#include "jvm_aix.h" 39#include "libo4.hpp" 40#include "libperfstat_aix.hpp" 41#include "libodm_aix.hpp" 42#include "loadlib_aix.hpp" 43#include "memory/allocation.inline.hpp" 44#include "memory/filemap.hpp" 45#include "misc_aix.hpp" 46#include "mutex_aix.inline.hpp" 47#include "oops/oop.inline.hpp" 48#include "os_aix.inline.hpp" 49#include "os_share_aix.hpp" 50#include "porting_aix.hpp" 51#include "prims/jniFastGetField.hpp" 52#include "prims/jvm.h" 53#include "prims/jvm_misc.hpp" 54#include "runtime/arguments.hpp" 55#include "runtime/atomic.inline.hpp" 56#include "runtime/extendedPC.hpp" 57#include "runtime/globals.hpp" 58#include "runtime/interfaceSupport.hpp" 59#include "runtime/java.hpp" 60#include "runtime/javaCalls.hpp" 61#include "runtime/mutexLocker.hpp" 62#include "runtime/objectMonitor.hpp" 63#include "runtime/orderAccess.inline.hpp" 64#include "runtime/os.hpp" 65#include "runtime/osThread.hpp" 66#include "runtime/perfMemory.hpp" 67#include "runtime/sharedRuntime.hpp" 68#include "runtime/statSampler.hpp" 69#include "runtime/stubRoutines.hpp" 70#include "runtime/thread.inline.hpp" 71#include "runtime/threadCritical.hpp" 72#include "runtime/timer.hpp" 73#include "runtime/vm_version.hpp" 74#include "services/attachListener.hpp" 75#include "services/runtimeService.hpp" 76#include "utilities/decoder.hpp" 77#include "utilities/defaultStream.hpp" 78#include "utilities/events.hpp" 79#include "utilities/growableArray.hpp" 80#include "utilities/vmError.hpp" 81 82// put OS-includes here (sorted alphabetically) 83#include <errno.h> 84#include <fcntl.h> 85#include <inttypes.h> 86#include <poll.h> 87#include <procinfo.h> 88#include <pthread.h> 89#include <pwd.h> 90#include <semaphore.h> 91#include <signal.h> 92#include <stdint.h> 93#include <stdio.h> 94#include <string.h> 95#include <unistd.h> 96#include <sys/ioctl.h> 97#include <sys/ipc.h> 98#include <sys/mman.h> 99#include <sys/resource.h> 100#include <sys/select.h> 101#include <sys/shm.h> 102#include <sys/socket.h> 103#include <sys/stat.h> 104#include <sys/sysinfo.h> 105#include <sys/systemcfg.h> 106#include <sys/time.h> 107#include <sys/times.h> 108#include <sys/types.h> 109#include <sys/utsname.h> 110#include <sys/vminfo.h> 111#include <sys/wait.h> 112 113// Missing prototypes for various system APIs. 114extern "C" 115int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t); 116 117#if !defined(_AIXVERSION_610) 118extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int); 119extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int); 120extern "C" int getargs (procsinfo*, int, char*, int); 121#endif 122 123#define MAX_PATH (2 * K) 124 125// for timer info max values which include all bits 126#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 127// for multipage initialization error analysis (in 'g_multipage_error') 128#define ERROR_MP_OS_TOO_OLD 100 129#define ERROR_MP_EXTSHM_ACTIVE 101 130#define ERROR_MP_VMGETINFO_FAILED 102 131#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103 132 133// Query dimensions of the stack of the calling thread. 134static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size); 135static address resolve_function_descriptor_to_code_pointer(address p); 136 137static void vmembk_print_on(outputStream* os); 138 139//////////////////////////////////////////////////////////////////////////////// 140// global variables (for a description see os_aix.hpp) 141 142julong os::Aix::_physical_memory = 0; 143 144pthread_t os::Aix::_main_thread = ((pthread_t)0); 145int os::Aix::_page_size = -1; 146 147// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase 148int os::Aix::_on_pase = -1; 149 150// 0 = uninitialized, otherwise 32 bit number: 151// 0xVVRRTTSS 152// VV - major version 153// RR - minor version 154// TT - tech level, if known, 0 otherwise 155// SS - service pack, if known, 0 otherwise 156uint32_t os::Aix::_os_version = 0; 157 158int os::Aix::_stack_page_size = -1; 159 160// -1 = uninitialized, 0 - no, 1 - yes 161int os::Aix::_xpg_sus_mode = -1; 162 163// -1 = uninitialized, 0 - no, 1 - yes 164int os::Aix::_extshm = -1; 165 166//////////////////////////////////////////////////////////////////////////////// 167// local variables 168 169static jlong initial_time_count = 0; 170static int clock_tics_per_sec = 100; 171static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks) 172static bool check_signals = true; 173static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769) 174static sigset_t SR_sigset; 175 176// Process break recorded at startup. 177static address g_brk_at_startup = NULL; 178 179// This describes the state of multipage support of the underlying 180// OS. Note that this is of no interest to the outsize world and 181// therefore should not be defined in AIX class. 182// 183// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The 184// latter two (16M "large" resp. 16G "huge" pages) require special 185// setup and are normally not available. 186// 187// AIX supports multiple page sizes per process, for: 188// - Stack (of the primordial thread, so not relevant for us) 189// - Data - data, bss, heap, for us also pthread stacks 190// - Text - text code 191// - shared memory 192// 193// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...) 194// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...). 195// 196// For shared memory, page size can be set dynamically via 197// shmctl(). Different shared memory regions can have different page 198// sizes. 199// 200// More information can be found at AIBM info center: 201// http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm 202// 203static struct { 204 size_t pagesize; // sysconf _SC_PAGESIZE (4K) 205 size_t datapsize; // default data page size (LDR_CNTRL DATAPSIZE) 206 size_t shmpsize; // default shared memory page size (LDR_CNTRL SHMPSIZE) 207 size_t pthr_stack_pagesize; // stack page size of pthread threads 208 size_t textpsize; // default text page size (LDR_CNTRL STACKPSIZE) 209 bool can_use_64K_pages; // True if we can alloc 64K pages dynamically with Sys V shm. 210 bool can_use_16M_pages; // True if we can alloc 16M pages dynamically with Sys V shm. 211 int error; // Error describing if something went wrong at multipage init. 212} g_multipage_support = { 213 (size_t) -1, 214 (size_t) -1, 215 (size_t) -1, 216 (size_t) -1, 217 (size_t) -1, 218 false, false, 219 0 220}; 221 222// We must not accidentally allocate memory close to the BRK - even if 223// that would work - because then we prevent the BRK segment from 224// growing which may result in a malloc OOM even though there is 225// enough memory. The problem only arises if we shmat() or mmap() at 226// a specific wish address, e.g. to place the heap in a 227// compressed-oops-friendly way. 228static bool is_close_to_brk(address a) { 229 assert0(g_brk_at_startup != NULL); 230 if (a >= g_brk_at_startup && 231 a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) { 232 return true; 233 } 234 return false; 235} 236 237julong os::available_memory() { 238 return Aix::available_memory(); 239} 240 241julong os::Aix::available_memory() { 242 // Avoid expensive API call here, as returned value will always be null. 243 if (os::Aix::on_pase()) { 244 return 0x0LL; 245 } 246 os::Aix::meminfo_t mi; 247 if (os::Aix::get_meminfo(&mi)) { 248 return mi.real_free; 249 } else { 250 return ULONG_MAX; 251 } 252} 253 254julong os::physical_memory() { 255 return Aix::physical_memory(); 256} 257 258// Return true if user is running as root. 259 260bool os::have_special_privileges() { 261 static bool init = false; 262 static bool privileges = false; 263 if (!init) { 264 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 265 init = true; 266 } 267 return privileges; 268} 269 270// Helper function, emulates disclaim64 using multiple 32bit disclaims 271// because we cannot use disclaim64() on AS/400 and old AIX releases. 272static bool my_disclaim64(char* addr, size_t size) { 273 274 if (size == 0) { 275 return true; 276 } 277 278 // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.) 279 const unsigned int maxDisclaimSize = 0x40000000; 280 281 const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize); 282 const unsigned int lastDisclaimSize = (size % maxDisclaimSize); 283 284 char* p = addr; 285 286 for (int i = 0; i < numFullDisclaimsNeeded; i ++) { 287 if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) { 288 trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno); 289 return false; 290 } 291 p += maxDisclaimSize; 292 } 293 294 if (lastDisclaimSize > 0) { 295 if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) { 296 trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno); 297 return false; 298 } 299 } 300 301 return true; 302} 303 304// Cpu architecture string 305#if defined(PPC32) 306static char cpu_arch[] = "ppc"; 307#elif defined(PPC64) 308static char cpu_arch[] = "ppc64"; 309#else 310#error Add appropriate cpu_arch setting 311#endif 312 313// Wrap the function "vmgetinfo" which is not available on older OS releases. 314static int checked_vmgetinfo(void *out, int command, int arg) { 315 if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) { 316 guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1"); 317 } 318 return ::vmgetinfo(out, command, arg); 319} 320 321// Given an address, returns the size of the page backing that address. 322size_t os::Aix::query_pagesize(void* addr) { 323 324 if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) { 325 // AS/400 older than V6R1: no vmgetinfo here, default to 4K 326 return SIZE_4K; 327 } 328 329 vm_page_info pi; 330 pi.addr = (uint64_t)addr; 331 if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) { 332 return pi.pagesize; 333 } else { 334 assert(false, "vmgetinfo failed to retrieve page size"); 335 return SIZE_4K; 336 } 337} 338 339void os::Aix::initialize_system_info() { 340 341 // Get the number of online(logical) cpus instead of configured. 342 os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN); 343 assert(_processor_count > 0, "_processor_count must be > 0"); 344 345 // Retrieve total physical storage. 346 os::Aix::meminfo_t mi; 347 if (!os::Aix::get_meminfo(&mi)) { 348 assert(false, "os::Aix::get_meminfo failed."); 349 } 350 _physical_memory = (julong) mi.real_total; 351} 352 353// Helper function for tracing page sizes. 354static const char* describe_pagesize(size_t pagesize) { 355 switch (pagesize) { 356 case SIZE_4K : return "4K"; 357 case SIZE_64K: return "64K"; 358 case SIZE_16M: return "16M"; 359 case SIZE_16G: return "16G"; 360 default: 361 assert(false, "surprise"); 362 return "??"; 363 } 364} 365 366// Probe OS for multipage support. 367// Will fill the global g_multipage_support structure. 368// Must be called before calling os::large_page_init(). 369static void query_multipage_support() { 370 371 guarantee(g_multipage_support.pagesize == -1, 372 "do not call twice"); 373 374 g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE); 375 376 // This really would surprise me. 377 assert(g_multipage_support.pagesize == SIZE_4K, "surprise!"); 378 379 // Query default data page size (default page size for C-Heap, pthread stacks and .bss). 380 // Default data page size is defined either by linker options (-bdatapsize) 381 // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given, 382 // default should be 4K. 383 { 384 void* p = ::malloc(SIZE_16M); 385 g_multipage_support.datapsize = os::Aix::query_pagesize(p); 386 ::free(p); 387 } 388 389 // Query default shm page size (LDR_CNTRL SHMPSIZE). 390 // Note that this is pure curiosity. We do not rely on default page size but set 391 // our own page size after allocated. 392 { 393 const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR); 394 guarantee(shmid != -1, "shmget failed"); 395 void* p = ::shmat(shmid, NULL, 0); 396 ::shmctl(shmid, IPC_RMID, NULL); 397 guarantee(p != (void*) -1, "shmat failed"); 398 g_multipage_support.shmpsize = os::Aix::query_pagesize(p); 399 ::shmdt(p); 400 } 401 402 // Before querying the stack page size, make sure we are not running as primordial 403 // thread (because primordial thread's stack may have different page size than 404 // pthread thread stacks). Running a VM on the primordial thread won't work for a 405 // number of reasons so we may just as well guarantee it here. 406 guarantee0(!os::Aix::is_primordial_thread()); 407 408 // Query pthread stack page size. Should be the same as data page size because 409 // pthread stacks are allocated from C-Heap. 410 { 411 int dummy = 0; 412 g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy); 413 } 414 415 // Query default text page size (LDR_CNTRL TEXTPSIZE). 416 { 417 address any_function = 418 resolve_function_descriptor_to_code_pointer((address)describe_pagesize); 419 g_multipage_support.textpsize = os::Aix::query_pagesize(any_function); 420 } 421 422 // Now probe for support of 64K pages and 16M pages. 423 424 // Before OS/400 V6R1, there is no support for pages other than 4K. 425 if (os::Aix::on_pase_V5R4_or_older()) { 426 trcVerbose("OS/400 < V6R1 - no large page support."); 427 g_multipage_support.error = ERROR_MP_OS_TOO_OLD; 428 goto query_multipage_support_end; 429 } 430 431 // Now check which page sizes the OS claims it supports, and of those, which actually can be used. 432 { 433 const int MAX_PAGE_SIZES = 4; 434 psize_t sizes[MAX_PAGE_SIZES]; 435 const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES); 436 if (num_psizes == -1) { 437 trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno); 438 trcVerbose("disabling multipage support."); 439 g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED; 440 goto query_multipage_support_end; 441 } 442 guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed."); 443 assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?"); 444 trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes); 445 for (int i = 0; i < num_psizes; i ++) { 446 trcVerbose(" %s ", describe_pagesize(sizes[i])); 447 } 448 449 // Can we use 64K, 16M pages? 450 for (int i = 0; i < num_psizes; i ++) { 451 const size_t pagesize = sizes[i]; 452 if (pagesize != SIZE_64K && pagesize != SIZE_16M) { 453 continue; 454 } 455 bool can_use = false; 456 trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize)); 457 const int shmid = ::shmget(IPC_PRIVATE, pagesize, 458 IPC_CREAT | S_IRUSR | S_IWUSR); 459 guarantee0(shmid != -1); // Should always work. 460 // Try to set pagesize. 461 struct shmid_ds shm_buf = { 0 }; 462 shm_buf.shm_pagesize = pagesize; 463 if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) { 464 const int en = errno; 465 ::shmctl(shmid, IPC_RMID, NULL); // As early as possible! 466 trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n", 467 errno); 468 } else { 469 // Attach and double check pageisze. 470 void* p = ::shmat(shmid, NULL, 0); 471 ::shmctl(shmid, IPC_RMID, NULL); // As early as possible! 472 guarantee0(p != (void*) -1); // Should always work. 473 const size_t real_pagesize = os::Aix::query_pagesize(p); 474 if (real_pagesize != pagesize) { 475 trcVerbose("real page size (0x%llX) differs.", real_pagesize); 476 } else { 477 can_use = true; 478 } 479 ::shmdt(p); 480 } 481 trcVerbose("Can use: %s", (can_use ? "yes" : "no")); 482 if (pagesize == SIZE_64K) { 483 g_multipage_support.can_use_64K_pages = can_use; 484 } else if (pagesize == SIZE_16M) { 485 g_multipage_support.can_use_16M_pages = can_use; 486 } 487 } 488 489 } // end: check which pages can be used for shared memory 490 491query_multipage_support_end: 492 493 trcVerbose("base page size (sysconf _SC_PAGESIZE): %s", 494 describe_pagesize(g_multipage_support.pagesize)); 495 trcVerbose("Data page size (C-Heap, bss, etc): %s", 496 describe_pagesize(g_multipage_support.datapsize)); 497 trcVerbose("Text page size: %s", 498 describe_pagesize(g_multipage_support.textpsize)); 499 trcVerbose("Thread stack page size (pthread): %s", 500 describe_pagesize(g_multipage_support.pthr_stack_pagesize)); 501 trcVerbose("Default shared memory page size: %s", 502 describe_pagesize(g_multipage_support.shmpsize)); 503 trcVerbose("Can use 64K pages dynamically with shared meory: %s", 504 (g_multipage_support.can_use_64K_pages ? "yes" :"no")); 505 trcVerbose("Can use 16M pages dynamically with shared memory: %s", 506 (g_multipage_support.can_use_16M_pages ? "yes" :"no")); 507 trcVerbose("Multipage error details: %d", 508 g_multipage_support.error); 509 510 // sanity checks 511 assert0(g_multipage_support.pagesize == SIZE_4K); 512 assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K); 513 assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K); 514 assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize); 515 assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K); 516 517} 518 519void os::init_system_properties_values() { 520 521#define DEFAULT_LIBPATH "/lib:/usr/lib" 522#define EXTENSIONS_DIR "/lib/ext" 523 524 // Buffer that fits several sprintfs. 525 // Note that the space for the trailing null is provided 526 // by the nulls included by the sizeof operator. 527 const size_t bufsize = 528 MAX2((size_t)MAXPATHLEN, // For dll_dir & friends. 529 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir 530 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 531 532 // sysclasspath, java_home, dll_dir 533 { 534 char *pslash; 535 os::jvm_path(buf, bufsize); 536 537 // Found the full path to libjvm.so. 538 // Now cut the path to <java_home>/jre if we can. 539 pslash = strrchr(buf, '/'); 540 if (pslash != NULL) { 541 *pslash = '\0'; // Get rid of /libjvm.so. 542 } 543 pslash = strrchr(buf, '/'); 544 if (pslash != NULL) { 545 *pslash = '\0'; // Get rid of /{client|server|hotspot}. 546 } 547 Arguments::set_dll_dir(buf); 548 549 if (pslash != NULL) { 550 pslash = strrchr(buf, '/'); 551 if (pslash != NULL) { 552 *pslash = '\0'; // Get rid of /<arch>. 553 pslash = strrchr(buf, '/'); 554 if (pslash != NULL) { 555 *pslash = '\0'; // Get rid of /lib. 556 } 557 } 558 } 559 Arguments::set_java_home(buf); 560 set_boot_path('/', ':'); 561 } 562 563 // Where to look for native libraries. 564 565 // On Aix we get the user setting of LIBPATH. 566 // Eventually, all the library path setting will be done here. 567 // Get the user setting of LIBPATH. 568 const char *v = ::getenv("LIBPATH"); 569 const char *v_colon = ":"; 570 if (v == NULL) { v = ""; v_colon = ""; } 571 572 // Concatenate user and invariant part of ld_library_path. 573 // That's +1 for the colon and +1 for the trailing '\0'. 574 char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal); 575 sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon); 576 Arguments::set_library_path(ld_library_path); 577 FREE_C_HEAP_ARRAY(char, ld_library_path); 578 579 // Extensions directories. 580 sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home()); 581 Arguments::set_ext_dirs(buf); 582 583 FREE_C_HEAP_ARRAY(char, buf); 584 585#undef DEFAULT_LIBPATH 586#undef EXTENSIONS_DIR 587} 588 589//////////////////////////////////////////////////////////////////////////////// 590// breakpoint support 591 592void os::breakpoint() { 593 BREAKPOINT; 594} 595 596extern "C" void breakpoint() { 597 // use debugger to set breakpoint here 598} 599 600//////////////////////////////////////////////////////////////////////////////// 601// signal support 602 603debug_only(static bool signal_sets_initialized = false); 604static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 605 606bool os::Aix::is_sig_ignored(int sig) { 607 struct sigaction oact; 608 sigaction(sig, (struct sigaction*)NULL, &oact); 609 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 610 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 611 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) { 612 return true; 613 } else { 614 return false; 615 } 616} 617 618void os::Aix::signal_sets_init() { 619 // Should also have an assertion stating we are still single-threaded. 620 assert(!signal_sets_initialized, "Already initialized"); 621 // Fill in signals that are necessarily unblocked for all threads in 622 // the VM. Currently, we unblock the following signals: 623 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 624 // by -Xrs (=ReduceSignalUsage)); 625 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 626 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 627 // the dispositions or masks wrt these signals. 628 // Programs embedding the VM that want to use the above signals for their 629 // own purposes must, at this time, use the "-Xrs" option to prevent 630 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 631 // (See bug 4345157, and other related bugs). 632 // In reality, though, unblocking these signals is really a nop, since 633 // these signals are not blocked by default. 634 sigemptyset(&unblocked_sigs); 635 sigemptyset(&allowdebug_blocked_sigs); 636 sigaddset(&unblocked_sigs, SIGILL); 637 sigaddset(&unblocked_sigs, SIGSEGV); 638 sigaddset(&unblocked_sigs, SIGBUS); 639 sigaddset(&unblocked_sigs, SIGFPE); 640 sigaddset(&unblocked_sigs, SIGTRAP); 641 sigaddset(&unblocked_sigs, SIGDANGER); 642 sigaddset(&unblocked_sigs, SR_signum); 643 644 if (!ReduceSignalUsage) { 645 if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 646 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 647 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 648 } 649 if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 650 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 651 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 652 } 653 if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 654 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 655 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 656 } 657 } 658 // Fill in signals that are blocked by all but the VM thread. 659 sigemptyset(&vm_sigs); 660 if (!ReduceSignalUsage) 661 sigaddset(&vm_sigs, BREAK_SIGNAL); 662 debug_only(signal_sets_initialized = true); 663} 664 665// These are signals that are unblocked while a thread is running Java. 666// (For some reason, they get blocked by default.) 667sigset_t* os::Aix::unblocked_signals() { 668 assert(signal_sets_initialized, "Not initialized"); 669 return &unblocked_sigs; 670} 671 672// These are the signals that are blocked while a (non-VM) thread is 673// running Java. Only the VM thread handles these signals. 674sigset_t* os::Aix::vm_signals() { 675 assert(signal_sets_initialized, "Not initialized"); 676 return &vm_sigs; 677} 678 679// These are signals that are blocked during cond_wait to allow debugger in 680sigset_t* os::Aix::allowdebug_blocked_signals() { 681 assert(signal_sets_initialized, "Not initialized"); 682 return &allowdebug_blocked_sigs; 683} 684 685void os::Aix::hotspot_sigmask(Thread* thread) { 686 687 //Save caller's signal mask before setting VM signal mask 688 sigset_t caller_sigmask; 689 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask); 690 691 OSThread* osthread = thread->osthread(); 692 osthread->set_caller_sigmask(caller_sigmask); 693 694 pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL); 695 696 if (!ReduceSignalUsage) { 697 if (thread->is_VM_thread()) { 698 // Only the VM thread handles BREAK_SIGNAL ... 699 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL); 700 } else { 701 // ... all other threads block BREAK_SIGNAL 702 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL); 703 } 704 } 705} 706 707// retrieve memory information. 708// Returns false if something went wrong; 709// content of pmi undefined in this case. 710bool os::Aix::get_meminfo(meminfo_t* pmi) { 711 712 assert(pmi, "get_meminfo: invalid parameter"); 713 714 memset(pmi, 0, sizeof(meminfo_t)); 715 716 if (os::Aix::on_pase()) { 717 // On PASE, use the libo4 porting library. 718 719 unsigned long long virt_total = 0; 720 unsigned long long real_total = 0; 721 unsigned long long real_free = 0; 722 unsigned long long pgsp_total = 0; 723 unsigned long long pgsp_free = 0; 724 if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) { 725 pmi->virt_total = virt_total; 726 pmi->real_total = real_total; 727 pmi->real_free = real_free; 728 pmi->pgsp_total = pgsp_total; 729 pmi->pgsp_free = pgsp_free; 730 return true; 731 } 732 return false; 733 734 } else { 735 736 // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics 737 // See: 738 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 739 // ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm 740 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 741 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm 742 743 perfstat_memory_total_t psmt; 744 memset (&psmt, '\0', sizeof(psmt)); 745 const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1); 746 if (rc == -1) { 747 trcVerbose("perfstat_memory_total() failed (errno=%d)", errno); 748 assert(0, "perfstat_memory_total() failed"); 749 return false; 750 } 751 752 assert(rc == 1, "perfstat_memory_total() - weird return code"); 753 754 // excerpt from 755 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 756 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm 757 // The fields of perfstat_memory_total_t: 758 // u_longlong_t virt_total Total virtual memory (in 4 KB pages). 759 // u_longlong_t real_total Total real memory (in 4 KB pages). 760 // u_longlong_t real_free Free real memory (in 4 KB pages). 761 // u_longlong_t pgsp_total Total paging space (in 4 KB pages). 762 // u_longlong_t pgsp_free Free paging space (in 4 KB pages). 763 764 pmi->virt_total = psmt.virt_total * 4096; 765 pmi->real_total = psmt.real_total * 4096; 766 pmi->real_free = psmt.real_free * 4096; 767 pmi->pgsp_total = psmt.pgsp_total * 4096; 768 pmi->pgsp_free = psmt.pgsp_free * 4096; 769 770 return true; 771 772 } 773} // end os::Aix::get_meminfo 774 775////////////////////////////////////////////////////////////////////////////// 776// create new thread 777 778// Thread start routine for all newly created threads 779static void *java_start(Thread *thread) { 780 781 // find out my own stack dimensions 782 { 783 // actually, this should do exactly the same as thread->record_stack_base_and_size... 784 address base = 0; 785 size_t size = 0; 786 query_stack_dimensions(&base, &size); 787 thread->set_stack_base(base); 788 thread->set_stack_size(size); 789 } 790 791 const pthread_t pthread_id = ::pthread_self(); 792 const tid_t kernel_thread_id = ::thread_self(); 793 794 trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT 795 ", stack %p ... %p, stacksize 0x%IX (%IB)", 796 pthread_id, kernel_thread_id, 797 thread->stack_end(), 798 thread->stack_base(), 799 thread->stack_size(), 800 thread->stack_size()); 801 802 // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc() 803 // by the pthread library). In rare cases, this may not be the case, e.g. when third-party 804 // tools hook pthread_create(). In this case, we may run into problems establishing 805 // guard pages on those stacks, because the stacks may reside in memory which is not 806 // protectable (shmated). 807 if (thread->stack_base() > ::sbrk(0)) { 808 trcVerbose("Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id); 809 } 810 811 // Try to randomize the cache line index of hot stack frames. 812 // This helps when threads of the same stack traces evict each other's 813 // cache lines. The threads can be either from the same JVM instance, or 814 // from different JVM instances. The benefit is especially true for 815 // processors with hyperthreading technology. 816 817 static int counter = 0; 818 int pid = os::current_process_id(); 819 alloca(((pid ^ counter++) & 7) * 128); 820 821 thread->initialize_thread_current(); 822 823 OSThread* osthread = thread->osthread(); 824 825 // Thread_id is pthread id. 826 osthread->set_thread_id(pthread_id); 827 828 // .. but keep kernel thread id too for diagnostics 829 osthread->set_kernel_thread_id(kernel_thread_id); 830 831 // Initialize signal mask for this thread. 832 os::Aix::hotspot_sigmask(thread); 833 834 // Initialize floating point control register. 835 os::Aix::init_thread_fpu_state(); 836 837 assert(osthread->get_state() == RUNNABLE, "invalid os thread state"); 838 839 // Call one more level start routine. 840 thread->run(); 841 842 trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".", 843 pthread_id, kernel_thread_id); 844 845 return 0; 846} 847 848bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 849 850 assert(thread->osthread() == NULL, "caller responsible"); 851 852 // Allocate the OSThread object 853 OSThread* osthread = new OSThread(NULL, NULL); 854 if (osthread == NULL) { 855 return false; 856 } 857 858 // set the correct thread state 859 osthread->set_thread_type(thr_type); 860 861 // Initial state is ALLOCATED but not INITIALIZED 862 osthread->set_state(ALLOCATED); 863 864 thread->set_osthread(osthread); 865 866 // init thread attributes 867 pthread_attr_t attr; 868 pthread_attr_init(&attr); 869 guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???"); 870 871 // Make sure we run in 1:1 kernel-user-thread mode. 872 if (os::Aix::on_aix()) { 873 guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???"); 874 guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???"); 875 } // end: aix 876 877 // Start in suspended state, and in os::thread_start, wake the thread up. 878 guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???"); 879 880 // calculate stack size if it's not specified by caller 881 if (stack_size == 0) { 882 stack_size = os::Aix::default_stack_size(thr_type); 883 884 switch (thr_type) { 885 case os::java_thread: 886 // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss. 887 assert(JavaThread::stack_size_at_create() > 0, "this should be set"); 888 stack_size = JavaThread::stack_size_at_create(); 889 break; 890 case os::compiler_thread: 891 if (CompilerThreadStackSize > 0) { 892 stack_size = (size_t)(CompilerThreadStackSize * K); 893 break; 894 } // else fall through: 895 // use VMThreadStackSize if CompilerThreadStackSize is not defined 896 case os::vm_thread: 897 case os::pgc_thread: 898 case os::cgc_thread: 899 case os::watcher_thread: 900 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 901 break; 902 } 903 } 904 905 stack_size = MAX2(stack_size, os::Aix::min_stack_allowed); 906 pthread_attr_setstacksize(&attr, stack_size); 907 908 pthread_t tid; 909 int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread); 910 911 pthread_attr_destroy(&attr); 912 913 if (ret == 0) { 914 trcVerbose("Created New Thread : pthread-id %u", tid); 915 } else { 916 if (os::Aix::on_pase()) { 917 // QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries 918 // using QSH. Otherwise pthread_create fails with errno=11. 919 trcVerbose("(Please make sure you set the environment variable " 920 "QIBM_MULTI_THREADED=Y before running this program.)"); 921 } 922 if (PrintMiscellaneous && (Verbose || WizardMode)) { 923 perror("pthread_create()"); 924 } 925 // Need to clean up stuff we've allocated so far 926 thread->set_osthread(NULL); 927 delete osthread; 928 return false; 929 } 930 931 // OSThread::thread_id is the pthread id. 932 osthread->set_thread_id(tid); 933 934 return true; 935} 936 937///////////////////////////////////////////////////////////////////////////// 938// attach existing thread 939 940// bootstrap the main thread 941bool os::create_main_thread(JavaThread* thread) { 942 assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread"); 943 return create_attached_thread(thread); 944} 945 946bool os::create_attached_thread(JavaThread* thread) { 947#ifdef ASSERT 948 thread->verify_not_published(); 949#endif 950 951 // Allocate the OSThread object 952 OSThread* osthread = new OSThread(NULL, NULL); 953 954 if (osthread == NULL) { 955 return false; 956 } 957 958 const pthread_t pthread_id = ::pthread_self(); 959 const tid_t kernel_thread_id = ::thread_self(); 960 961 trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)", 962 pthread_id, kernel_thread_id, 963 thread->stack_end(), 964 thread->stack_base(), 965 thread->stack_size(), 966 thread->stack_size()); 967 968 // OSThread::thread_id is the pthread id. 969 osthread->set_thread_id(pthread_id); 970 971 // .. but keep kernel thread id too for diagnostics 972 osthread->set_kernel_thread_id(kernel_thread_id); 973 974 // initialize floating point control register 975 os::Aix::init_thread_fpu_state(); 976 977 // Initial thread state is RUNNABLE 978 osthread->set_state(RUNNABLE); 979 980 thread->set_osthread(osthread); 981 982 if (UseNUMA) { 983 int lgrp_id = os::numa_get_group_id(); 984 if (lgrp_id != -1) { 985 thread->set_lgrp_id(lgrp_id); 986 } 987 } 988 989 // initialize signal mask for this thread 990 // and save the caller's signal mask 991 os::Aix::hotspot_sigmask(thread); 992 993 return true; 994} 995 996void os::pd_start_thread(Thread* thread) { 997 int status = pthread_continue_np(thread->osthread()->pthread_id()); 998 assert(status == 0, "thr_continue failed"); 999} 1000 1001// Free OS resources related to the OSThread 1002void os::free_thread(OSThread* osthread) { 1003 assert(osthread != NULL, "osthread not set"); 1004 1005 if (Thread::current()->osthread() == osthread) { 1006 // Restore caller's signal mask 1007 sigset_t sigmask = osthread->caller_sigmask(); 1008 pthread_sigmask(SIG_SETMASK, &sigmask, NULL); 1009 } 1010 1011 delete osthread; 1012} 1013 1014//////////////////////////////////////////////////////////////////////////////// 1015// time support 1016 1017// Time since start-up in seconds to a fine granularity. 1018// Used by VMSelfDestructTimer and the MemProfiler. 1019double os::elapsedTime() { 1020 return (double)(os::elapsed_counter()) * 0.000001; 1021} 1022 1023jlong os::elapsed_counter() { 1024 timeval time; 1025 int status = gettimeofday(&time, NULL); 1026 return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; 1027} 1028 1029jlong os::elapsed_frequency() { 1030 return (1000 * 1000); 1031} 1032 1033bool os::supports_vtime() { return true; } 1034bool os::enable_vtime() { return false; } 1035bool os::vtime_enabled() { return false; } 1036 1037double os::elapsedVTime() { 1038 struct rusage usage; 1039 int retval = getrusage(RUSAGE_THREAD, &usage); 1040 if (retval == 0) { 1041 return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000); 1042 } else { 1043 // better than nothing, but not much 1044 return elapsedTime(); 1045 } 1046} 1047 1048jlong os::javaTimeMillis() { 1049 timeval time; 1050 int status = gettimeofday(&time, NULL); 1051 assert(status != -1, "aix error at gettimeofday()"); 1052 return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000); 1053} 1054 1055void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 1056 timeval time; 1057 int status = gettimeofday(&time, NULL); 1058 assert(status != -1, "aix error at gettimeofday()"); 1059 seconds = jlong(time.tv_sec); 1060 nanos = jlong(time.tv_usec) * 1000; 1061} 1062 1063jlong os::javaTimeNanos() { 1064 if (os::Aix::on_pase()) { 1065 1066 timeval time; 1067 int status = gettimeofday(&time, NULL); 1068 assert(status != -1, "PASE error at gettimeofday()"); 1069 jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec); 1070 return 1000 * usecs; 1071 1072 } else { 1073 // On AIX use the precision of processors real time clock 1074 // or time base registers. 1075 timebasestruct_t time; 1076 int rc; 1077 1078 // If the CPU has a time register, it will be used and 1079 // we have to convert to real time first. After convertion we have following data: 1080 // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970] 1081 // time.tb_low [nanoseconds after the last full second above] 1082 // We better use mread_real_time here instead of read_real_time 1083 // to ensure that we will get a monotonic increasing time. 1084 if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) { 1085 rc = time_base_to_time(&time, TIMEBASE_SZ); 1086 assert(rc != -1, "aix error at time_base_to_time()"); 1087 } 1088 return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low); 1089 } 1090} 1091 1092void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1093 info_ptr->max_value = ALL_64_BITS; 1094 // mread_real_time() is monotonic (see 'os::javaTimeNanos()') 1095 info_ptr->may_skip_backward = false; 1096 info_ptr->may_skip_forward = false; 1097 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1098} 1099 1100// Return the real, user, and system times in seconds from an 1101// arbitrary fixed point in the past. 1102bool os::getTimesSecs(double* process_real_time, 1103 double* process_user_time, 1104 double* process_system_time) { 1105 struct tms ticks; 1106 clock_t real_ticks = times(&ticks); 1107 1108 if (real_ticks == (clock_t) (-1)) { 1109 return false; 1110 } else { 1111 double ticks_per_second = (double) clock_tics_per_sec; 1112 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1113 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1114 *process_real_time = ((double) real_ticks) / ticks_per_second; 1115 1116 return true; 1117 } 1118} 1119 1120char * os::local_time_string(char *buf, size_t buflen) { 1121 struct tm t; 1122 time_t long_time; 1123 time(&long_time); 1124 localtime_r(&long_time, &t); 1125 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1126 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1127 t.tm_hour, t.tm_min, t.tm_sec); 1128 return buf; 1129} 1130 1131struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 1132 return localtime_r(clock, res); 1133} 1134 1135//////////////////////////////////////////////////////////////////////////////// 1136// runtime exit support 1137 1138// Note: os::shutdown() might be called very early during initialization, or 1139// called from signal handler. Before adding something to os::shutdown(), make 1140// sure it is async-safe and can handle partially initialized VM. 1141void os::shutdown() { 1142 1143 // allow PerfMemory to attempt cleanup of any persistent resources 1144 perfMemory_exit(); 1145 1146 // needs to remove object in file system 1147 AttachListener::abort(); 1148 1149 // flush buffered output, finish log files 1150 ostream_abort(); 1151 1152 // Check for abort hook 1153 abort_hook_t abort_hook = Arguments::abort_hook(); 1154 if (abort_hook != NULL) { 1155 abort_hook(); 1156 } 1157} 1158 1159// Note: os::abort() might be called very early during initialization, or 1160// called from signal handler. Before adding something to os::abort(), make 1161// sure it is async-safe and can handle partially initialized VM. 1162void os::abort(bool dump_core, void* siginfo, const void* context) { 1163 os::shutdown(); 1164 if (dump_core) { 1165#ifndef PRODUCT 1166 fdStream out(defaultStream::output_fd()); 1167 out.print_raw("Current thread is "); 1168 char buf[16]; 1169 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1170 out.print_raw_cr(buf); 1171 out.print_raw_cr("Dumping core ..."); 1172#endif 1173 ::abort(); // dump core 1174 } 1175 1176 ::exit(1); 1177} 1178 1179// Die immediately, no exit hook, no abort hook, no cleanup. 1180void os::die() { 1181 ::abort(); 1182} 1183 1184// This method is a copy of JDK's sysGetLastErrorString 1185// from src/solaris/hpi/src/system_md.c 1186 1187size_t os::lasterror(char *buf, size_t len) { 1188 if (errno == 0) return 0; 1189 1190 const char *s = ::strerror(errno); 1191 size_t n = ::strlen(s); 1192 if (n >= len) { 1193 n = len - 1; 1194 } 1195 ::strncpy(buf, s, n); 1196 buf[n] = '\0'; 1197 return n; 1198} 1199 1200intx os::current_thread_id() { 1201 return (intx)pthread_self(); 1202} 1203 1204int os::current_process_id() { 1205 return getpid(); 1206} 1207 1208// DLL functions 1209 1210const char* os::dll_file_extension() { return ".so"; } 1211 1212// This must be hard coded because it's the system's temporary 1213// directory not the java application's temp directory, ala java.io.tmpdir. 1214const char* os::get_temp_directory() { return "/tmp"; } 1215 1216static bool file_exists(const char* filename) { 1217 struct stat statbuf; 1218 if (filename == NULL || strlen(filename) == 0) { 1219 return false; 1220 } 1221 return os::stat(filename, &statbuf) == 0; 1222} 1223 1224bool os::dll_build_name(char* buffer, size_t buflen, 1225 const char* pname, const char* fname) { 1226 bool retval = false; 1227 // Copied from libhpi 1228 const size_t pnamelen = pname ? strlen(pname) : 0; 1229 1230 // Return error on buffer overflow. 1231 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1232 *buffer = '\0'; 1233 return retval; 1234 } 1235 1236 if (pnamelen == 0) { 1237 snprintf(buffer, buflen, "lib%s.so", fname); 1238 retval = true; 1239 } else if (strchr(pname, *os::path_separator()) != NULL) { 1240 int n; 1241 char** pelements = split_path(pname, &n); 1242 if (pelements == NULL) { 1243 return false; 1244 } 1245 for (int i = 0; i < n; i++) { 1246 // Really shouldn't be NULL, but check can't hurt 1247 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1248 continue; // skip the empty path values 1249 } 1250 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1251 if (file_exists(buffer)) { 1252 retval = true; 1253 break; 1254 } 1255 } 1256 // release the storage 1257 for (int i = 0; i < n; i++) { 1258 if (pelements[i] != NULL) { 1259 FREE_C_HEAP_ARRAY(char, pelements[i]); 1260 } 1261 } 1262 if (pelements != NULL) { 1263 FREE_C_HEAP_ARRAY(char*, pelements); 1264 } 1265 } else { 1266 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1267 retval = true; 1268 } 1269 return retval; 1270} 1271 1272// Check if addr is inside libjvm.so. 1273bool os::address_is_in_vm(address addr) { 1274 1275 // Input could be a real pc or a function pointer literal. The latter 1276 // would be a function descriptor residing in the data segment of a module. 1277 loaded_module_t lm; 1278 if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) { 1279 return lm.is_in_vm; 1280 } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) { 1281 return lm.is_in_vm; 1282 } else { 1283 return false; 1284 } 1285 1286} 1287 1288// Resolve an AIX function descriptor literal to a code pointer. 1289// If the input is a valid code pointer to a text segment of a loaded module, 1290// it is returned unchanged. 1291// If the input is a valid AIX function descriptor, it is resolved to the 1292// code entry point. 1293// If the input is neither a valid function descriptor nor a valid code pointer, 1294// NULL is returned. 1295static address resolve_function_descriptor_to_code_pointer(address p) { 1296 1297 if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) { 1298 // It is a real code pointer. 1299 return p; 1300 } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) { 1301 // Pointer to data segment, potential function descriptor. 1302 address code_entry = (address)(((FunctionDescriptor*)p)->entry()); 1303 if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) { 1304 // It is a function descriptor. 1305 return code_entry; 1306 } 1307 } 1308 1309 return NULL; 1310} 1311 1312bool os::dll_address_to_function_name(address addr, char *buf, 1313 int buflen, int *offset, 1314 bool demangle) { 1315 if (offset) { 1316 *offset = -1; 1317 } 1318 // Buf is not optional, but offset is optional. 1319 assert(buf != NULL, "sanity check"); 1320 buf[0] = '\0'; 1321 1322 // Resolve function ptr literals first. 1323 addr = resolve_function_descriptor_to_code_pointer(addr); 1324 if (!addr) { 1325 return false; 1326 } 1327 1328 return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle); 1329} 1330 1331bool os::dll_address_to_library_name(address addr, char* buf, 1332 int buflen, int* offset) { 1333 if (offset) { 1334 *offset = -1; 1335 } 1336 // Buf is not optional, but offset is optional. 1337 assert(buf != NULL, "sanity check"); 1338 buf[0] = '\0'; 1339 1340 // Resolve function ptr literals first. 1341 addr = resolve_function_descriptor_to_code_pointer(addr); 1342 if (!addr) { 1343 return false; 1344 } 1345 1346 return AixSymbols::get_module_name(addr, buf, buflen); 1347} 1348 1349// Loads .dll/.so and in case of error it checks if .dll/.so was built 1350// for the same architecture as Hotspot is running on. 1351void *os::dll_load(const char *filename, char *ebuf, int ebuflen) { 1352 1353 if (ebuf && ebuflen > 0) { 1354 ebuf[0] = '\0'; 1355 ebuf[ebuflen - 1] = '\0'; 1356 } 1357 1358 if (!filename || strlen(filename) == 0) { 1359 ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1); 1360 return NULL; 1361 } 1362 1363 // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants. 1364 void * result= ::dlopen(filename, RTLD_LAZY); 1365 if (result != NULL) { 1366 // Reload dll cache. Don't do this in signal handling. 1367 LoadedLibraries::reload(); 1368 return result; 1369 } else { 1370 // error analysis when dlopen fails 1371 const char* const error_report = ::dlerror(); 1372 if (error_report && ebuf && ebuflen > 0) { 1373 snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s", 1374 filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report); 1375 } 1376 } 1377 return NULL; 1378} 1379 1380void* os::dll_lookup(void* handle, const char* name) { 1381 void* res = dlsym(handle, name); 1382 return res; 1383} 1384 1385void* os::get_default_process_handle() { 1386 return (void*)::dlopen(NULL, RTLD_LAZY); 1387} 1388 1389void os::print_dll_info(outputStream *st) { 1390 st->print_cr("Dynamic libraries:"); 1391 LoadedLibraries::print(st); 1392} 1393 1394void os::get_summary_os_info(char* buf, size_t buflen) { 1395 // There might be something more readable than uname results for AIX. 1396 struct utsname name; 1397 uname(&name); 1398 snprintf(buf, buflen, "%s %s", name.release, name.version); 1399} 1400 1401void os::print_os_info(outputStream* st) { 1402 st->print("OS:"); 1403 1404 st->print("uname:"); 1405 struct utsname name; 1406 uname(&name); 1407 st->print(name.sysname); st->print(" "); 1408 st->print(name.nodename); st->print(" "); 1409 st->print(name.release); st->print(" "); 1410 st->print(name.version); st->print(" "); 1411 st->print(name.machine); 1412 st->cr(); 1413 1414 uint32_t ver = os::Aix::os_version(); 1415 st->print_cr("AIX kernel version %u.%u.%u.%u", 1416 (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF); 1417 1418 // rlimit 1419 st->print("rlimit:"); 1420 struct rlimit rlim; 1421 1422 st->print(" STACK "); 1423 getrlimit(RLIMIT_STACK, &rlim); 1424 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1425 else st->print("%uk", rlim.rlim_cur >> 10); 1426 1427 st->print(", CORE "); 1428 getrlimit(RLIMIT_CORE, &rlim); 1429 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1430 else st->print("%uk", rlim.rlim_cur >> 10); 1431 1432 st->print(", NPROC "); 1433 st->print("%d", sysconf(_SC_CHILD_MAX)); 1434 1435 st->print(", NOFILE "); 1436 getrlimit(RLIMIT_NOFILE, &rlim); 1437 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1438 else st->print("%d", rlim.rlim_cur); 1439 1440 st->print(", AS "); 1441 getrlimit(RLIMIT_AS, &rlim); 1442 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1443 else st->print("%uk", rlim.rlim_cur >> 10); 1444 1445 // Print limits on DATA, because it limits the C-heap. 1446 st->print(", DATA "); 1447 getrlimit(RLIMIT_DATA, &rlim); 1448 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1449 else st->print("%uk", rlim.rlim_cur >> 10); 1450 st->cr(); 1451 1452 // load average 1453 st->print("load average:"); 1454 double loadavg[3] = {-1.L, -1.L, -1.L}; 1455 os::loadavg(loadavg, 3); 1456 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); 1457 st->cr(); 1458 1459 // print wpar info 1460 libperfstat::wparinfo_t wi; 1461 if (libperfstat::get_wparinfo(&wi)) { 1462 st->print_cr("wpar info"); 1463 st->print_cr("name: %s", wi.name); 1464 st->print_cr("id: %d", wi.wpar_id); 1465 st->print_cr("type: %s", (wi.app_wpar ? "application" : "system")); 1466 } 1467 1468 // print partition info 1469 libperfstat::partitioninfo_t pi; 1470 if (libperfstat::get_partitioninfo(&pi)) { 1471 st->print_cr("partition info"); 1472 st->print_cr(" name: %s", pi.name); 1473 } 1474 1475} 1476 1477void os::print_memory_info(outputStream* st) { 1478 1479 st->print_cr("Memory:"); 1480 1481 st->print_cr(" Base page size (sysconf _SC_PAGESIZE): %s", 1482 describe_pagesize(g_multipage_support.pagesize)); 1483 st->print_cr(" Data page size (C-Heap, bss, etc): %s", 1484 describe_pagesize(g_multipage_support.datapsize)); 1485 st->print_cr(" Text page size: %s", 1486 describe_pagesize(g_multipage_support.textpsize)); 1487 st->print_cr(" Thread stack page size (pthread): %s", 1488 describe_pagesize(g_multipage_support.pthr_stack_pagesize)); 1489 st->print_cr(" Default shared memory page size: %s", 1490 describe_pagesize(g_multipage_support.shmpsize)); 1491 st->print_cr(" Can use 64K pages dynamically with shared meory: %s", 1492 (g_multipage_support.can_use_64K_pages ? "yes" :"no")); 1493 st->print_cr(" Can use 16M pages dynamically with shared memory: %s", 1494 (g_multipage_support.can_use_16M_pages ? "yes" :"no")); 1495 st->print_cr(" Multipage error: %d", 1496 g_multipage_support.error); 1497 st->cr(); 1498 st->print_cr(" os::vm_page_size: %s", describe_pagesize(os::vm_page_size())); 1499 // not used in OpenJDK st->print_cr(" os::stack_page_size: %s", describe_pagesize(os::stack_page_size())); 1500 1501 // print out LDR_CNTRL because it affects the default page sizes 1502 const char* const ldr_cntrl = ::getenv("LDR_CNTRL"); 1503 st->print_cr(" LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>"); 1504 1505 // Print out EXTSHM because it is an unsupported setting. 1506 const char* const extshm = ::getenv("EXTSHM"); 1507 st->print_cr(" EXTSHM=%s.", extshm ? extshm : "<unset>"); 1508 if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) { 1509 st->print_cr(" *** Unsupported! Please remove EXTSHM from your environment! ***"); 1510 } 1511 1512 // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks. 1513 const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES"); 1514 st->print_cr(" AIXTHREAD_GUARDPAGES=%s.", 1515 aixthread_guardpages ? aixthread_guardpages : "<unset>"); 1516 1517 os::Aix::meminfo_t mi; 1518 if (os::Aix::get_meminfo(&mi)) { 1519 char buffer[256]; 1520 if (os::Aix::on_aix()) { 1521 st->print_cr("physical total : " SIZE_FORMAT, mi.real_total); 1522 st->print_cr("physical free : " SIZE_FORMAT, mi.real_free); 1523 st->print_cr("swap total : " SIZE_FORMAT, mi.pgsp_total); 1524 st->print_cr("swap free : " SIZE_FORMAT, mi.pgsp_free); 1525 } else { 1526 // PASE - Numbers are result of QWCRSSTS; they mean: 1527 // real_total: Sum of all system pools 1528 // real_free: always 0 1529 // pgsp_total: we take the size of the system ASP 1530 // pgsp_free: size of system ASP times percentage of system ASP unused 1531 st->print_cr("physical total : " SIZE_FORMAT, mi.real_total); 1532 st->print_cr("system asp total : " SIZE_FORMAT, mi.pgsp_total); 1533 st->print_cr("%% system asp used : " SIZE_FORMAT, 1534 mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f); 1535 } 1536 st->print_raw(buffer); 1537 } 1538 st->cr(); 1539 1540 // Print segments allocated with os::reserve_memory. 1541 st->print_cr("internal virtual memory regions used by vm:"); 1542 vmembk_print_on(st); 1543} 1544 1545// Get a string for the cpuinfo that is a summary of the cpu type 1546void os::get_summary_cpu_info(char* buf, size_t buflen) { 1547 // This looks good 1548 libperfstat::cpuinfo_t ci; 1549 if (libperfstat::get_cpuinfo(&ci)) { 1550 strncpy(buf, ci.version, buflen); 1551 } else { 1552 strncpy(buf, "AIX", buflen); 1553 } 1554} 1555 1556void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1557 st->print("CPU:"); 1558 st->print("total %d", os::processor_count()); 1559 // It's not safe to query number of active processors after crash. 1560 // st->print("(active %d)", os::active_processor_count()); 1561 st->print(" %s", VM_Version::features()); 1562 st->cr(); 1563} 1564 1565static void print_signal_handler(outputStream* st, int sig, 1566 char* buf, size_t buflen); 1567 1568void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1569 st->print_cr("Signal Handlers:"); 1570 print_signal_handler(st, SIGSEGV, buf, buflen); 1571 print_signal_handler(st, SIGBUS , buf, buflen); 1572 print_signal_handler(st, SIGFPE , buf, buflen); 1573 print_signal_handler(st, SIGPIPE, buf, buflen); 1574 print_signal_handler(st, SIGXFSZ, buf, buflen); 1575 print_signal_handler(st, SIGILL , buf, buflen); 1576 print_signal_handler(st, SR_signum, buf, buflen); 1577 print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen); 1578 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 1579 print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen); 1580 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 1581 print_signal_handler(st, SIGTRAP, buf, buflen); 1582 print_signal_handler(st, SIGDANGER, buf, buflen); 1583} 1584 1585static char saved_jvm_path[MAXPATHLEN] = {0}; 1586 1587// Find the full path to the current module, libjvm.so. 1588void os::jvm_path(char *buf, jint buflen) { 1589 // Error checking. 1590 if (buflen < MAXPATHLEN) { 1591 assert(false, "must use a large-enough buffer"); 1592 buf[0] = '\0'; 1593 return; 1594 } 1595 // Lazy resolve the path to current module. 1596 if (saved_jvm_path[0] != 0) { 1597 strcpy(buf, saved_jvm_path); 1598 return; 1599 } 1600 1601 Dl_info dlinfo; 1602 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 1603 assert(ret != 0, "cannot locate libjvm"); 1604 char* rp = realpath((char *)dlinfo.dli_fname, buf); 1605 assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?"); 1606 1607 strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path)); 1608 saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0'; 1609} 1610 1611void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1612 // no prefix required, not even "_" 1613} 1614 1615void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1616 // no suffix required 1617} 1618 1619//////////////////////////////////////////////////////////////////////////////// 1620// sun.misc.Signal support 1621 1622static volatile jint sigint_count = 0; 1623 1624static void 1625UserHandler(int sig, void *siginfo, void *context) { 1626 // 4511530 - sem_post is serialized and handled by the manager thread. When 1627 // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We 1628 // don't want to flood the manager thread with sem_post requests. 1629 if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) 1630 return; 1631 1632 // Ctrl-C is pressed during error reporting, likely because the error 1633 // handler fails to abort. Let VM die immediately. 1634 if (sig == SIGINT && is_error_reported()) { 1635 os::die(); 1636 } 1637 1638 os::signal_notify(sig); 1639} 1640 1641void* os::user_handler() { 1642 return CAST_FROM_FN_PTR(void*, UserHandler); 1643} 1644 1645extern "C" { 1646 typedef void (*sa_handler_t)(int); 1647 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 1648} 1649 1650void* os::signal(int signal_number, void* handler) { 1651 struct sigaction sigAct, oldSigAct; 1652 1653 sigfillset(&(sigAct.sa_mask)); 1654 1655 // Do not block out synchronous signals in the signal handler. 1656 // Blocking synchronous signals only makes sense if you can really 1657 // be sure that those signals won't happen during signal handling, 1658 // when the blocking applies. Normal signal handlers are lean and 1659 // do not cause signals. But our signal handlers tend to be "risky" 1660 // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen. 1661 // On AIX, PASE there was a case where a SIGSEGV happened, followed 1662 // by a SIGILL, which was blocked due to the signal mask. The process 1663 // just hung forever. Better to crash from a secondary signal than to hang. 1664 sigdelset(&(sigAct.sa_mask), SIGSEGV); 1665 sigdelset(&(sigAct.sa_mask), SIGBUS); 1666 sigdelset(&(sigAct.sa_mask), SIGILL); 1667 sigdelset(&(sigAct.sa_mask), SIGFPE); 1668 sigdelset(&(sigAct.sa_mask), SIGTRAP); 1669 1670 sigAct.sa_flags = SA_RESTART|SA_SIGINFO; 1671 1672 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 1673 1674 if (sigaction(signal_number, &sigAct, &oldSigAct)) { 1675 // -1 means registration failed 1676 return (void *)-1; 1677 } 1678 1679 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 1680} 1681 1682void os::signal_raise(int signal_number) { 1683 ::raise(signal_number); 1684} 1685 1686// 1687// The following code is moved from os.cpp for making this 1688// code platform specific, which it is by its very nature. 1689// 1690 1691// Will be modified when max signal is changed to be dynamic 1692int os::sigexitnum_pd() { 1693 return NSIG; 1694} 1695 1696// a counter for each possible signal value 1697static volatile jint pending_signals[NSIG+1] = { 0 }; 1698 1699// Wrapper functions for: sem_init(), sem_post(), sem_wait() 1700// On AIX, we use sem_init(), sem_post(), sem_wait() 1701// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores 1702// do not seem to work at all on PASE (unimplemented, will cause SIGILL). 1703// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as 1704// on AIX, msem_..() calls are suspected of causing problems. 1705static sem_t sig_sem; 1706static msemaphore* p_sig_msem = 0; 1707 1708static void local_sem_init() { 1709 if (os::Aix::on_aix()) { 1710 int rc = ::sem_init(&sig_sem, 0, 0); 1711 guarantee(rc != -1, "sem_init failed"); 1712 } else { 1713 // Memory semaphores must live in shared mem. 1714 guarantee0(p_sig_msem == NULL); 1715 p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL); 1716 guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore"); 1717 guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed"); 1718 } 1719} 1720 1721static void local_sem_post() { 1722 static bool warn_only_once = false; 1723 if (os::Aix::on_aix()) { 1724 int rc = ::sem_post(&sig_sem); 1725 if (rc == -1 && !warn_only_once) { 1726 trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno)); 1727 warn_only_once = true; 1728 } 1729 } else { 1730 guarantee0(p_sig_msem != NULL); 1731 int rc = ::msem_unlock(p_sig_msem, 0); 1732 if (rc == -1 && !warn_only_once) { 1733 trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno)); 1734 warn_only_once = true; 1735 } 1736 } 1737} 1738 1739static void local_sem_wait() { 1740 static bool warn_only_once = false; 1741 if (os::Aix::on_aix()) { 1742 int rc = ::sem_wait(&sig_sem); 1743 if (rc == -1 && !warn_only_once) { 1744 trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno)); 1745 warn_only_once = true; 1746 } 1747 } else { 1748 guarantee0(p_sig_msem != NULL); // must init before use 1749 int rc = ::msem_lock(p_sig_msem, 0); 1750 if (rc == -1 && !warn_only_once) { 1751 trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno)); 1752 warn_only_once = true; 1753 } 1754 } 1755} 1756 1757void os::signal_init_pd() { 1758 // Initialize signal structures 1759 ::memset((void*)pending_signals, 0, sizeof(pending_signals)); 1760 1761 // Initialize signal semaphore 1762 local_sem_init(); 1763} 1764 1765void os::signal_notify(int sig) { 1766 Atomic::inc(&pending_signals[sig]); 1767 local_sem_post(); 1768} 1769 1770static int check_pending_signals(bool wait) { 1771 Atomic::store(0, &sigint_count); 1772 for (;;) { 1773 for (int i = 0; i < NSIG + 1; i++) { 1774 jint n = pending_signals[i]; 1775 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 1776 return i; 1777 } 1778 } 1779 if (!wait) { 1780 return -1; 1781 } 1782 JavaThread *thread = JavaThread::current(); 1783 ThreadBlockInVM tbivm(thread); 1784 1785 bool threadIsSuspended; 1786 do { 1787 thread->set_suspend_equivalent(); 1788 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 1789 1790 local_sem_wait(); 1791 1792 // were we externally suspended while we were waiting? 1793 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 1794 if (threadIsSuspended) { 1795 // 1796 // The semaphore has been incremented, but while we were waiting 1797 // another thread suspended us. We don't want to continue running 1798 // while suspended because that would surprise the thread that 1799 // suspended us. 1800 // 1801 1802 local_sem_post(); 1803 1804 thread->java_suspend_self(); 1805 } 1806 } while (threadIsSuspended); 1807 } 1808} 1809 1810int os::signal_lookup() { 1811 return check_pending_signals(false); 1812} 1813 1814int os::signal_wait() { 1815 return check_pending_signals(true); 1816} 1817 1818//////////////////////////////////////////////////////////////////////////////// 1819// Virtual Memory 1820 1821// We need to keep small simple bookkeeping for os::reserve_memory and friends. 1822 1823#define VMEM_MAPPED 1 1824#define VMEM_SHMATED 2 1825 1826struct vmembk_t { 1827 int type; // 1 - mmap, 2 - shmat 1828 char* addr; 1829 size_t size; // Real size, may be larger than usersize. 1830 size_t pagesize; // page size of area 1831 vmembk_t* next; 1832 1833 bool contains_addr(char* p) const { 1834 return p >= addr && p < (addr + size); 1835 } 1836 1837 bool contains_range(char* p, size_t s) const { 1838 return contains_addr(p) && contains_addr(p + s - 1); 1839 } 1840 1841 void print_on(outputStream* os) const { 1842 os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT 1843 " bytes, %d %s pages), %s", 1844 addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize), 1845 (type == VMEM_SHMATED ? "shmat" : "mmap") 1846 ); 1847 } 1848 1849 // Check that range is a sub range of memory block (or equal to memory block); 1850 // also check that range is fully page aligned to the page size if the block. 1851 void assert_is_valid_subrange(char* p, size_t s) const { 1852 if (!contains_range(p, s)) { 1853 trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub " 1854 "range of [" PTR_FORMAT " - " PTR_FORMAT "].", 1855 p, p + s, addr, addr + size); 1856 guarantee0(false); 1857 } 1858 if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) { 1859 trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not" 1860 " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize); 1861 guarantee0(false); 1862 } 1863 } 1864}; 1865 1866static struct { 1867 vmembk_t* first; 1868 MiscUtils::CritSect cs; 1869} vmem; 1870 1871static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) { 1872 vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t)); 1873 assert0(p); 1874 if (p) { 1875 MiscUtils::AutoCritSect lck(&vmem.cs); 1876 p->addr = addr; p->size = size; 1877 p->pagesize = pagesize; 1878 p->type = type; 1879 p->next = vmem.first; 1880 vmem.first = p; 1881 } 1882} 1883 1884static vmembk_t* vmembk_find(char* addr) { 1885 MiscUtils::AutoCritSect lck(&vmem.cs); 1886 for (vmembk_t* p = vmem.first; p; p = p->next) { 1887 if (p->addr <= addr && (p->addr + p->size) > addr) { 1888 return p; 1889 } 1890 } 1891 return NULL; 1892} 1893 1894static void vmembk_remove(vmembk_t* p0) { 1895 MiscUtils::AutoCritSect lck(&vmem.cs); 1896 assert0(p0); 1897 assert0(vmem.first); // List should not be empty. 1898 for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) { 1899 if (*pp == p0) { 1900 *pp = p0->next; 1901 ::free(p0); 1902 return; 1903 } 1904 } 1905 assert0(false); // Not found? 1906} 1907 1908static void vmembk_print_on(outputStream* os) { 1909 MiscUtils::AutoCritSect lck(&vmem.cs); 1910 for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) { 1911 vmi->print_on(os); 1912 os->cr(); 1913 } 1914} 1915 1916// Reserve and attach a section of System V memory. 1917// If <requested_addr> is not NULL, function will attempt to attach the memory at the given 1918// address. Failing that, it will attach the memory anywhere. 1919// If <requested_addr> is NULL, function will attach the memory anywhere. 1920// 1921// <alignment_hint> is being ignored by this function. It is very probable however that the 1922// alignment requirements are met anyway, because shmat() attaches at 256M boundaries. 1923// Should this be not enogh, we can put more work into it. 1924static char* reserve_shmated_memory ( 1925 size_t bytes, 1926 char* requested_addr, 1927 size_t alignment_hint) { 1928 1929 trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress " 1930 PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...", 1931 bytes, requested_addr, alignment_hint); 1932 1933 // Either give me wish address or wish alignment but not both. 1934 assert0(!(requested_addr != NULL && alignment_hint != 0)); 1935 1936 // We must prevent anyone from attaching too close to the 1937 // BRK because that may cause malloc OOM. 1938 if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) { 1939 trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. " 1940 "Will attach anywhere.", requested_addr); 1941 // Act like the OS refused to attach there. 1942 requested_addr = NULL; 1943 } 1944 1945 // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not 1946 // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead. 1947 if (os::Aix::on_pase_V5R4_or_older()) { 1948 ShouldNotReachHere(); 1949 } 1950 1951 // Align size of shm up to 64K to avoid errors if we later try to change the page size. 1952 const size_t size = align_size_up(bytes, SIZE_64K); 1953 1954 // Reserve the shared segment. 1955 int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR); 1956 if (shmid == -1) { 1957 trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno); 1958 return NULL; 1959 } 1960 1961 // Important note: 1962 // It is very important that we, upon leaving this function, do not leave a shm segment alive. 1963 // We must right after attaching it remove it from the system. System V shm segments are global and 1964 // survive the process. 1965 // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A). 1966 1967 struct shmid_ds shmbuf; 1968 memset(&shmbuf, 0, sizeof(shmbuf)); 1969 shmbuf.shm_pagesize = SIZE_64K; 1970 if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) { 1971 trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.", 1972 size / SIZE_64K, errno); 1973 // I want to know if this ever happens. 1974 assert(false, "failed to set page size for shmat"); 1975 } 1976 1977 // Now attach the shared segment. 1978 // Note that I attach with SHM_RND - which means that the requested address is rounded down, if 1979 // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address 1980 // were not a segment boundary. 1981 char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND); 1982 const int errno_shmat = errno; 1983 1984 // (A) Right after shmat and before handing shmat errors delete the shm segment. 1985 if (::shmctl(shmid, IPC_RMID, NULL) == -1) { 1986 trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno); 1987 assert(false, "failed to remove shared memory segment!"); 1988 } 1989 1990 // Handle shmat error. If we failed to attach, just return. 1991 if (addr == (char*)-1) { 1992 trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat); 1993 return NULL; 1994 } 1995 1996 // Just for info: query the real page size. In case setting the page size did not 1997 // work (see above), the system may have given us something other then 4K (LDR_CNTRL). 1998 const size_t real_pagesize = os::Aix::query_pagesize(addr); 1999 if (real_pagesize != shmbuf.shm_pagesize) { 2000 trcVerbose("pagesize is, surprisingly, %h.", real_pagesize); 2001 } 2002 2003 if (addr) { 2004 trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)", 2005 addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize)); 2006 } else { 2007 if (requested_addr != NULL) { 2008 trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr); 2009 } else { 2010 trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size); 2011 } 2012 } 2013 2014 // book-keeping 2015 vmembk_add(addr, size, real_pagesize, VMEM_SHMATED); 2016 assert0(is_aligned_to(addr, os::vm_page_size())); 2017 2018 return addr; 2019} 2020 2021static bool release_shmated_memory(char* addr, size_t size) { 2022 2023 trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2024 addr, addr + size - 1); 2025 2026 bool rc = false; 2027 2028 // TODO: is there a way to verify shm size without doing bookkeeping? 2029 if (::shmdt(addr) != 0) { 2030 trcVerbose("error (%d).", errno); 2031 } else { 2032 trcVerbose("ok."); 2033 rc = true; 2034 } 2035 return rc; 2036} 2037 2038static bool uncommit_shmated_memory(char* addr, size_t size) { 2039 trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2040 addr, addr + size - 1); 2041 2042 const bool rc = my_disclaim64(addr, size); 2043 2044 if (!rc) { 2045 trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size); 2046 return false; 2047 } 2048 return true; 2049} 2050 2051//////////////////////////////// mmap-based routines ///////////////////////////////// 2052 2053// Reserve memory via mmap. 2054// If <requested_addr> is given, an attempt is made to attach at the given address. 2055// Failing that, memory is allocated at any address. 2056// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to 2057// allocate at an address aligned with the given alignment. Failing that, memory 2058// is aligned anywhere. 2059static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2060 trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", " 2061 "alignment_hint " UINTX_FORMAT "...", 2062 bytes, requested_addr, alignment_hint); 2063 2064 // If a wish address is given, but not aligned to 4K page boundary, mmap will fail. 2065 if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) { 2066 trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr); 2067 return NULL; 2068 } 2069 2070 // We must prevent anyone from attaching too close to the 2071 // BRK because that may cause malloc OOM. 2072 if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) { 2073 trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. " 2074 "Will attach anywhere.", requested_addr); 2075 // Act like the OS refused to attach there. 2076 requested_addr = NULL; 2077 } 2078 2079 // Specify one or the other but not both. 2080 assert0(!(requested_addr != NULL && alignment_hint > 0)); 2081 2082 // In 64K mode, we claim the global page size (os::vm_page_size()) 2083 // is 64K. This is one of the few points where that illusion may 2084 // break, because mmap() will always return memory aligned to 4K. So 2085 // we must ensure we only ever return memory aligned to 64k. 2086 if (alignment_hint) { 2087 alignment_hint = lcm(alignment_hint, os::vm_page_size()); 2088 } else { 2089 alignment_hint = os::vm_page_size(); 2090 } 2091 2092 // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode). 2093 const size_t size = align_size_up(bytes, os::vm_page_size()); 2094 2095 // alignment: Allocate memory large enough to include an aligned range of the right size and 2096 // cut off the leading and trailing waste pages. 2097 assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above 2098 const size_t extra_size = size + alignment_hint; 2099 2100 // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to 2101 // later use msync(MS_INVALIDATE) (see os::uncommit_memory). 2102 int flags = MAP_ANONYMOUS | MAP_SHARED; 2103 2104 // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what 2105 // it means if wishaddress is given but MAP_FIXED is not set. 2106 // 2107 // Important! Behaviour differs depending on whether SPEC1170 mode is active or not. 2108 // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings. 2109 // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will 2110 // get clobbered. 2111 if (requested_addr != NULL) { 2112 if (!os::Aix::xpg_sus_mode()) { // not SPEC1170 Behaviour 2113 flags |= MAP_FIXED; 2114 } 2115 } 2116 2117 char* addr = (char*)::mmap(requested_addr, extra_size, 2118 PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0); 2119 2120 if (addr == MAP_FAILED) { 2121 trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno); 2122 return NULL; 2123 } 2124 2125 // Handle alignment. 2126 char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint); 2127 const size_t waste_pre = addr_aligned - addr; 2128 char* const addr_aligned_end = addr_aligned + size; 2129 const size_t waste_post = extra_size - waste_pre - size; 2130 if (waste_pre > 0) { 2131 ::munmap(addr, waste_pre); 2132 } 2133 if (waste_post > 0) { 2134 ::munmap(addr_aligned_end, waste_post); 2135 } 2136 addr = addr_aligned; 2137 2138 if (addr) { 2139 trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)", 2140 addr, addr + bytes, bytes); 2141 } else { 2142 if (requested_addr != NULL) { 2143 trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr); 2144 } else { 2145 trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes); 2146 } 2147 } 2148 2149 // bookkeeping 2150 vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED); 2151 2152 // Test alignment, see above. 2153 assert0(is_aligned_to(addr, os::vm_page_size())); 2154 2155 return addr; 2156} 2157 2158static bool release_mmaped_memory(char* addr, size_t size) { 2159 assert0(is_aligned_to(addr, os::vm_page_size())); 2160 assert0(is_aligned_to(size, os::vm_page_size())); 2161 2162 trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2163 addr, addr + size - 1); 2164 bool rc = false; 2165 2166 if (::munmap(addr, size) != 0) { 2167 trcVerbose("failed (%d)\n", errno); 2168 rc = false; 2169 } else { 2170 trcVerbose("ok."); 2171 rc = true; 2172 } 2173 2174 return rc; 2175} 2176 2177static bool uncommit_mmaped_memory(char* addr, size_t size) { 2178 2179 assert0(is_aligned_to(addr, os::vm_page_size())); 2180 assert0(is_aligned_to(size, os::vm_page_size())); 2181 2182 trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2183 addr, addr + size - 1); 2184 bool rc = false; 2185 2186 // Uncommit mmap memory with msync MS_INVALIDATE. 2187 if (::msync(addr, size, MS_INVALIDATE) != 0) { 2188 trcVerbose("failed (%d)\n", errno); 2189 rc = false; 2190 } else { 2191 trcVerbose("ok."); 2192 rc = true; 2193 } 2194 2195 return rc; 2196} 2197 2198int os::vm_page_size() { 2199 // Seems redundant as all get out. 2200 assert(os::Aix::page_size() != -1, "must call os::init"); 2201 return os::Aix::page_size(); 2202} 2203 2204// Aix allocates memory by pages. 2205int os::vm_allocation_granularity() { 2206 assert(os::Aix::page_size() != -1, "must call os::init"); 2207 return os::Aix::page_size(); 2208} 2209 2210#ifdef PRODUCT 2211static void warn_fail_commit_memory(char* addr, size_t size, bool exec, 2212 int err) { 2213 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2214 ", %d) failed; error='%s' (errno=%d)", addr, size, exec, 2215 strerror(err), err); 2216} 2217#endif 2218 2219void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 2220 const char* mesg) { 2221 assert(mesg != NULL, "mesg must be specified"); 2222 if (!pd_commit_memory(addr, size, exec)) { 2223 // Add extra info in product mode for vm_exit_out_of_memory(): 2224 PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);) 2225 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 2226 } 2227} 2228 2229bool os::pd_commit_memory(char* addr, size_t size, bool exec) { 2230 2231 assert(is_aligned_to(addr, os::vm_page_size()), 2232 "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2233 p2i(addr), os::vm_page_size()); 2234 assert(is_aligned_to(size, os::vm_page_size()), 2235 "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2236 size, os::vm_page_size()); 2237 2238 vmembk_t* const vmi = vmembk_find(addr); 2239 guarantee0(vmi); 2240 vmi->assert_is_valid_subrange(addr, size); 2241 2242 trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1); 2243 2244 if (UseExplicitCommit) { 2245 // AIX commits memory on touch. So, touch all pages to be committed. 2246 for (char* p = addr; p < (addr + size); p += SIZE_4K) { 2247 *p = '\0'; 2248 } 2249 } 2250 2251 return true; 2252} 2253 2254bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { 2255 return pd_commit_memory(addr, size, exec); 2256} 2257 2258void os::pd_commit_memory_or_exit(char* addr, size_t size, 2259 size_t alignment_hint, bool exec, 2260 const char* mesg) { 2261 // Alignment_hint is ignored on this OS. 2262 pd_commit_memory_or_exit(addr, size, exec, mesg); 2263} 2264 2265bool os::pd_uncommit_memory(char* addr, size_t size) { 2266 assert(is_aligned_to(addr, os::vm_page_size()), 2267 "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2268 p2i(addr), os::vm_page_size()); 2269 assert(is_aligned_to(size, os::vm_page_size()), 2270 "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2271 size, os::vm_page_size()); 2272 2273 // Dynamically do different things for mmap/shmat. 2274 const vmembk_t* const vmi = vmembk_find(addr); 2275 guarantee0(vmi); 2276 vmi->assert_is_valid_subrange(addr, size); 2277 2278 if (vmi->type == VMEM_SHMATED) { 2279 return uncommit_shmated_memory(addr, size); 2280 } else { 2281 return uncommit_mmaped_memory(addr, size); 2282 } 2283} 2284 2285bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2286 // Do not call this; no need to commit stack pages on AIX. 2287 ShouldNotReachHere(); 2288 return true; 2289} 2290 2291bool os::remove_stack_guard_pages(char* addr, size_t size) { 2292 // Do not call this; no need to commit stack pages on AIX. 2293 ShouldNotReachHere(); 2294 return true; 2295} 2296 2297void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2298} 2299 2300void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { 2301} 2302 2303void os::numa_make_global(char *addr, size_t bytes) { 2304} 2305 2306void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2307} 2308 2309bool os::numa_topology_changed() { 2310 return false; 2311} 2312 2313size_t os::numa_get_groups_num() { 2314 return 1; 2315} 2316 2317int os::numa_get_group_id() { 2318 return 0; 2319} 2320 2321size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2322 if (size > 0) { 2323 ids[0] = 0; 2324 return 1; 2325 } 2326 return 0; 2327} 2328 2329bool os::get_page_info(char *start, page_info* info) { 2330 return false; 2331} 2332 2333char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2334 return end; 2335} 2336 2337// Reserves and attaches a shared memory segment. 2338// Will assert if a wish address is given and could not be obtained. 2339char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2340 2341 // All other Unices do a mmap(MAP_FIXED) if the addr is given, 2342 // thereby clobbering old mappings at that place. That is probably 2343 // not intended, never used and almost certainly an error were it 2344 // ever be used this way (to try attaching at a specified address 2345 // without clobbering old mappings an alternate API exists, 2346 // os::attempt_reserve_memory_at()). 2347 // Instead of mimicking the dangerous coding of the other platforms, here I 2348 // just ignore the request address (release) or assert(debug). 2349 assert0(requested_addr == NULL); 2350 2351 // Always round to os::vm_page_size(), which may be larger than 4K. 2352 bytes = align_size_up(bytes, os::vm_page_size()); 2353 const size_t alignment_hint0 = 2354 alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0; 2355 2356 // In 4K mode always use mmap. 2357 // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. 2358 if (os::vm_page_size() == SIZE_4K) { 2359 return reserve_mmaped_memory(bytes, requested_addr, alignment_hint); 2360 } else { 2361 if (bytes >= Use64KPagesThreshold) { 2362 return reserve_shmated_memory(bytes, requested_addr, alignment_hint); 2363 } else { 2364 return reserve_mmaped_memory(bytes, requested_addr, alignment_hint); 2365 } 2366 } 2367} 2368 2369bool os::pd_release_memory(char* addr, size_t size) { 2370 2371 // Dynamically do different things for mmap/shmat. 2372 vmembk_t* const vmi = vmembk_find(addr); 2373 guarantee0(vmi); 2374 2375 // Always round to os::vm_page_size(), which may be larger than 4K. 2376 size = align_size_up(size, os::vm_page_size()); 2377 addr = (char *)align_ptr_up(addr, os::vm_page_size()); 2378 2379 bool rc = false; 2380 bool remove_bookkeeping = false; 2381 if (vmi->type == VMEM_SHMATED) { 2382 // For shmatted memory, we do: 2383 // - If user wants to release the whole range, release the memory (shmdt). 2384 // - If user only wants to release a partial range, uncommit (disclaim) that 2385 // range. That way, at least, we do not use memory anymore (bust still page 2386 // table space). 2387 vmi->assert_is_valid_subrange(addr, size); 2388 if (addr == vmi->addr && size == vmi->size) { 2389 rc = release_shmated_memory(addr, size); 2390 remove_bookkeeping = true; 2391 } else { 2392 rc = uncommit_shmated_memory(addr, size); 2393 } 2394 } else { 2395 // User may unmap partial regions but region has to be fully contained. 2396#ifdef ASSERT 2397 vmi->assert_is_valid_subrange(addr, size); 2398#endif 2399 rc = release_mmaped_memory(addr, size); 2400 remove_bookkeeping = true; 2401 } 2402 2403 // update bookkeeping 2404 if (rc && remove_bookkeeping) { 2405 vmembk_remove(vmi); 2406 } 2407 2408 return rc; 2409} 2410 2411static bool checked_mprotect(char* addr, size_t size, int prot) { 2412 2413 // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will 2414 // not tell me if protection failed when trying to protect an un-protectable range. 2415 // 2416 // This means if the memory was allocated using shmget/shmat, protection wont work 2417 // but mprotect will still return 0: 2418 // 2419 // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm 2420 2421 bool rc = ::mprotect(addr, size, prot) == 0 ? true : false; 2422 2423 if (!rc) { 2424 const char* const s_errno = strerror(errno); 2425 warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno); 2426 return false; 2427 } 2428 2429 // mprotect success check 2430 // 2431 // Mprotect said it changed the protection but can I believe it? 2432 // 2433 // To be sure I need to check the protection afterwards. Try to 2434 // read from protected memory and check whether that causes a segfault. 2435 // 2436 if (!os::Aix::xpg_sus_mode()) { 2437 2438 if (CanUseSafeFetch32()) { 2439 2440 const bool read_protected = 2441 (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 && 2442 SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false; 2443 2444 if (prot & PROT_READ) { 2445 rc = !read_protected; 2446 } else { 2447 rc = read_protected; 2448 } 2449 2450 if (!rc) { 2451 if (os::Aix::on_pase()) { 2452 // There is an issue on older PASE systems where mprotect() will return success but the 2453 // memory will not be protected. 2454 // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible 2455 // machines; we only see it rarely, when using mprotect() to protect the guard page of 2456 // a stack. It is an OS error. 2457 // 2458 // A valid strategy is just to try again. This usually works. :-/ 2459 2460 ::usleep(1000); 2461 if (::mprotect(addr, size, prot) == 0) { 2462 const bool read_protected_2 = 2463 (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 && 2464 SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false; 2465 rc = true; 2466 } 2467 } 2468 } 2469 } 2470 } 2471 2472 assert(rc == true, "mprotect failed."); 2473 2474 return rc; 2475} 2476 2477// Set protections specified 2478bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) { 2479 unsigned int p = 0; 2480 switch (prot) { 2481 case MEM_PROT_NONE: p = PROT_NONE; break; 2482 case MEM_PROT_READ: p = PROT_READ; break; 2483 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 2484 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 2485 default: 2486 ShouldNotReachHere(); 2487 } 2488 // is_committed is unused. 2489 return checked_mprotect(addr, size, p); 2490} 2491 2492bool os::guard_memory(char* addr, size_t size) { 2493 return checked_mprotect(addr, size, PROT_NONE); 2494} 2495 2496bool os::unguard_memory(char* addr, size_t size) { 2497 return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC); 2498} 2499 2500// Large page support 2501 2502static size_t _large_page_size = 0; 2503 2504// Enable large page support if OS allows that. 2505void os::large_page_init() { 2506 return; // Nothing to do. See query_multipage_support and friends. 2507} 2508 2509char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) { 2510 // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement 2511 // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()), 2512 // so this is not needed. 2513 assert(false, "should not be called on AIX"); 2514 return NULL; 2515} 2516 2517bool os::release_memory_special(char* base, size_t bytes) { 2518 // Detaching the SHM segment will also delete it, see reserve_memory_special(). 2519 Unimplemented(); 2520 return false; 2521} 2522 2523size_t os::large_page_size() { 2524 return _large_page_size; 2525} 2526 2527bool os::can_commit_large_page_memory() { 2528 // Does not matter, we do not support huge pages. 2529 return false; 2530} 2531 2532bool os::can_execute_large_page_memory() { 2533 // Does not matter, we do not support huge pages. 2534 return false; 2535} 2536 2537// Reserve memory at an arbitrary address, only if that area is 2538// available (and not reserved for something else). 2539char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2540 char* addr = NULL; 2541 2542 // Always round to os::vm_page_size(), which may be larger than 4K. 2543 bytes = align_size_up(bytes, os::vm_page_size()); 2544 2545 // In 4K mode always use mmap. 2546 // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. 2547 if (os::vm_page_size() == SIZE_4K) { 2548 return reserve_mmaped_memory(bytes, requested_addr, 0); 2549 } else { 2550 if (bytes >= Use64KPagesThreshold) { 2551 return reserve_shmated_memory(bytes, requested_addr, 0); 2552 } else { 2553 return reserve_mmaped_memory(bytes, requested_addr, 0); 2554 } 2555 } 2556 2557 return addr; 2558} 2559 2560size_t os::read(int fd, void *buf, unsigned int nBytes) { 2561 return ::read(fd, buf, nBytes); 2562} 2563 2564size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 2565 return ::pread(fd, buf, nBytes, offset); 2566} 2567 2568void os::naked_short_sleep(jlong ms) { 2569 struct timespec req; 2570 2571 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 2572 req.tv_sec = 0; 2573 if (ms > 0) { 2574 req.tv_nsec = (ms % 1000) * 1000000; 2575 } 2576 else { 2577 req.tv_nsec = 1; 2578 } 2579 2580 nanosleep(&req, NULL); 2581 2582 return; 2583} 2584 2585// Sleep forever; naked call to OS-specific sleep; use with CAUTION 2586void os::infinite_sleep() { 2587 while (true) { // sleep forever ... 2588 ::sleep(100); // ... 100 seconds at a time 2589 } 2590} 2591 2592// Used to convert frequent JVM_Yield() to nops 2593bool os::dont_yield() { 2594 return DontYieldALot; 2595} 2596 2597void os::naked_yield() { 2598 sched_yield(); 2599} 2600 2601//////////////////////////////////////////////////////////////////////////////// 2602// thread priority support 2603 2604// From AIX manpage to pthread_setschedparam 2605// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp? 2606// topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm): 2607// 2608// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the 2609// range from 40 to 80, where 40 is the least favored priority and 80 2610// is the most favored." 2611// 2612// (Actually, I doubt this even has an impact on AIX, as we do kernel 2613// scheduling there; however, this still leaves iSeries.) 2614// 2615// We use the same values for AIX and PASE. 2616int os::java_to_os_priority[CriticalPriority + 1] = { 2617 54, // 0 Entry should never be used 2618 2619 55, // 1 MinPriority 2620 55, // 2 2621 56, // 3 2622 2623 56, // 4 2624 57, // 5 NormPriority 2625 57, // 6 2626 2627 58, // 7 2628 58, // 8 2629 59, // 9 NearMaxPriority 2630 2631 60, // 10 MaxPriority 2632 2633 60 // 11 CriticalPriority 2634}; 2635 2636OSReturn os::set_native_priority(Thread* thread, int newpri) { 2637 if (!UseThreadPriorities) return OS_OK; 2638 pthread_t thr = thread->osthread()->pthread_id(); 2639 int policy = SCHED_OTHER; 2640 struct sched_param param; 2641 param.sched_priority = newpri; 2642 int ret = pthread_setschedparam(thr, policy, ¶m); 2643 2644 if (ret != 0) { 2645 trcVerbose("Could not change priority for thread %d to %d (error %d, %s)", 2646 (int)thr, newpri, ret, strerror(ret)); 2647 } 2648 return (ret == 0) ? OS_OK : OS_ERR; 2649} 2650 2651OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 2652 if (!UseThreadPriorities) { 2653 *priority_ptr = java_to_os_priority[NormPriority]; 2654 return OS_OK; 2655 } 2656 pthread_t thr = thread->osthread()->pthread_id(); 2657 int policy = SCHED_OTHER; 2658 struct sched_param param; 2659 int ret = pthread_getschedparam(thr, &policy, ¶m); 2660 *priority_ptr = param.sched_priority; 2661 2662 return (ret == 0) ? OS_OK : OS_ERR; 2663} 2664 2665// Hint to the underlying OS that a task switch would not be good. 2666// Void return because it's a hint and can fail. 2667void os::hint_no_preempt() {} 2668 2669//////////////////////////////////////////////////////////////////////////////// 2670// suspend/resume support 2671 2672// the low-level signal-based suspend/resume support is a remnant from the 2673// old VM-suspension that used to be for java-suspension, safepoints etc, 2674// within hotspot. Now there is a single use-case for this: 2675// - calling get_thread_pc() on the VMThread by the flat-profiler task 2676// that runs in the watcher thread. 2677// The remaining code is greatly simplified from the more general suspension 2678// code that used to be used. 2679// 2680// The protocol is quite simple: 2681// - suspend: 2682// - sends a signal to the target thread 2683// - polls the suspend state of the osthread using a yield loop 2684// - target thread signal handler (SR_handler) sets suspend state 2685// and blocks in sigsuspend until continued 2686// - resume: 2687// - sets target osthread state to continue 2688// - sends signal to end the sigsuspend loop in the SR_handler 2689// 2690// Note that the SR_lock plays no role in this suspend/resume protocol. 2691// 2692 2693static void resume_clear_context(OSThread *osthread) { 2694 osthread->set_ucontext(NULL); 2695 osthread->set_siginfo(NULL); 2696} 2697 2698static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { 2699 osthread->set_ucontext(context); 2700 osthread->set_siginfo(siginfo); 2701} 2702 2703// 2704// Handler function invoked when a thread's execution is suspended or 2705// resumed. We have to be careful that only async-safe functions are 2706// called here (Note: most pthread functions are not async safe and 2707// should be avoided.) 2708// 2709// Note: sigwait() is a more natural fit than sigsuspend() from an 2710// interface point of view, but sigwait() prevents the signal hander 2711// from being run. libpthread would get very confused by not having 2712// its signal handlers run and prevents sigwait()'s use with the 2713// mutex granting granting signal. 2714// 2715// Currently only ever called on the VMThread and JavaThreads (PC sampling). 2716// 2717static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { 2718 // Save and restore errno to avoid confusing native code with EINTR 2719 // after sigsuspend. 2720 int old_errno = errno; 2721 2722 Thread* thread = Thread::current(); 2723 OSThread* osthread = thread->osthread(); 2724 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 2725 2726 os::SuspendResume::State current = osthread->sr.state(); 2727 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 2728 suspend_save_context(osthread, siginfo, context); 2729 2730 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 2731 os::SuspendResume::State state = osthread->sr.suspended(); 2732 if (state == os::SuspendResume::SR_SUSPENDED) { 2733 sigset_t suspend_set; // signals for sigsuspend() 2734 2735 // get current set of blocked signals and unblock resume signal 2736 pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); 2737 sigdelset(&suspend_set, SR_signum); 2738 2739 // wait here until we are resumed 2740 while (1) { 2741 sigsuspend(&suspend_set); 2742 2743 os::SuspendResume::State result = osthread->sr.running(); 2744 if (result == os::SuspendResume::SR_RUNNING) { 2745 break; 2746 } 2747 } 2748 2749 } else if (state == os::SuspendResume::SR_RUNNING) { 2750 // request was cancelled, continue 2751 } else { 2752 ShouldNotReachHere(); 2753 } 2754 2755 resume_clear_context(osthread); 2756 } else if (current == os::SuspendResume::SR_RUNNING) { 2757 // request was cancelled, continue 2758 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 2759 // ignore 2760 } else { 2761 ShouldNotReachHere(); 2762 } 2763 2764 errno = old_errno; 2765} 2766 2767static int SR_initialize() { 2768 struct sigaction act; 2769 char *s; 2770 // Get signal number to use for suspend/resume 2771 if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) { 2772 int sig = ::strtol(s, 0, 10); 2773 if (sig > MAX2(SIGSEGV, SIGBUS) && // See 4355769. 2774 sig < NSIG) { // Must be legal signal and fit into sigflags[]. 2775 SR_signum = sig; 2776 } else { 2777 warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.", 2778 sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum); 2779 } 2780 } 2781 2782 assert(SR_signum > SIGSEGV && SR_signum > SIGBUS, 2783 "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769"); 2784 2785 sigemptyset(&SR_sigset); 2786 sigaddset(&SR_sigset, SR_signum); 2787 2788 // Set up signal handler for suspend/resume. 2789 act.sa_flags = SA_RESTART|SA_SIGINFO; 2790 act.sa_handler = (void (*)(int)) SR_handler; 2791 2792 // SR_signum is blocked by default. 2793 pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask); 2794 2795 if (sigaction(SR_signum, &act, 0) == -1) { 2796 return -1; 2797 } 2798 2799 // Save signal flag 2800 os::Aix::set_our_sigflags(SR_signum, act.sa_flags); 2801 return 0; 2802} 2803 2804static int SR_finalize() { 2805 return 0; 2806} 2807 2808static int sr_notify(OSThread* osthread) { 2809 int status = pthread_kill(osthread->pthread_id(), SR_signum); 2810 assert_status(status == 0, status, "pthread_kill"); 2811 return status; 2812} 2813 2814// "Randomly" selected value for how long we want to spin 2815// before bailing out on suspending a thread, also how often 2816// we send a signal to a thread we want to resume 2817static const int RANDOMLY_LARGE_INTEGER = 1000000; 2818static const int RANDOMLY_LARGE_INTEGER2 = 100; 2819 2820// returns true on success and false on error - really an error is fatal 2821// but this seems the normal response to library errors 2822static bool do_suspend(OSThread* osthread) { 2823 assert(osthread->sr.is_running(), "thread should be running"); 2824 // mark as suspended and send signal 2825 2826 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 2827 // failed to switch, state wasn't running? 2828 ShouldNotReachHere(); 2829 return false; 2830 } 2831 2832 if (sr_notify(osthread) != 0) { 2833 // try to cancel, switch to running 2834 2835 os::SuspendResume::State result = osthread->sr.cancel_suspend(); 2836 if (result == os::SuspendResume::SR_RUNNING) { 2837 // cancelled 2838 return false; 2839 } else if (result == os::SuspendResume::SR_SUSPENDED) { 2840 // somehow managed to suspend 2841 return true; 2842 } else { 2843 ShouldNotReachHere(); 2844 return false; 2845 } 2846 } 2847 2848 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 2849 2850 for (int n = 0; !osthread->sr.is_suspended(); n++) { 2851 for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) { 2852 os::naked_yield(); 2853 } 2854 2855 // timeout, try to cancel the request 2856 if (n >= RANDOMLY_LARGE_INTEGER) { 2857 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 2858 if (cancelled == os::SuspendResume::SR_RUNNING) { 2859 return false; 2860 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 2861 return true; 2862 } else { 2863 ShouldNotReachHere(); 2864 return false; 2865 } 2866 } 2867 } 2868 2869 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 2870 return true; 2871} 2872 2873static void do_resume(OSThread* osthread) { 2874 //assert(osthread->sr.is_suspended(), "thread should be suspended"); 2875 2876 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 2877 // failed to switch to WAKEUP_REQUEST 2878 ShouldNotReachHere(); 2879 return; 2880 } 2881 2882 while (!osthread->sr.is_running()) { 2883 if (sr_notify(osthread) == 0) { 2884 for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) { 2885 for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) { 2886 os::naked_yield(); 2887 } 2888 } 2889 } else { 2890 ShouldNotReachHere(); 2891 } 2892 } 2893 2894 guarantee(osthread->sr.is_running(), "Must be running!"); 2895} 2896 2897/////////////////////////////////////////////////////////////////////////////////// 2898// signal handling (except suspend/resume) 2899 2900// This routine may be used by user applications as a "hook" to catch signals. 2901// The user-defined signal handler must pass unrecognized signals to this 2902// routine, and if it returns true (non-zero), then the signal handler must 2903// return immediately. If the flag "abort_if_unrecognized" is true, then this 2904// routine will never retun false (zero), but instead will execute a VM panic 2905// routine kill the process. 2906// 2907// If this routine returns false, it is OK to call it again. This allows 2908// the user-defined signal handler to perform checks either before or after 2909// the VM performs its own checks. Naturally, the user code would be making 2910// a serious error if it tried to handle an exception (such as a null check 2911// or breakpoint) that the VM was generating for its own correct operation. 2912// 2913// This routine may recognize any of the following kinds of signals: 2914// SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1. 2915// It should be consulted by handlers for any of those signals. 2916// 2917// The caller of this routine must pass in the three arguments supplied 2918// to the function referred to in the "sa_sigaction" (not the "sa_handler") 2919// field of the structure passed to sigaction(). This routine assumes that 2920// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 2921// 2922// Note that the VM will print warnings if it detects conflicting signal 2923// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 2924// 2925extern "C" JNIEXPORT int 2926JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); 2927 2928// Set thread signal mask (for some reason on AIX sigthreadmask() seems 2929// to be the thing to call; documentation is not terribly clear about whether 2930// pthread_sigmask also works, and if it does, whether it does the same. 2931bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) { 2932 const int rc = ::pthread_sigmask(how, set, oset); 2933 // return value semantics differ slightly for error case: 2934 // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno 2935 // (so, pthread_sigmask is more theadsafe for error handling) 2936 // But success is always 0. 2937 return rc == 0 ? true : false; 2938} 2939 2940// Function to unblock all signals which are, according 2941// to POSIX, typical program error signals. If they happen while being blocked, 2942// they typically will bring down the process immediately. 2943bool unblock_program_error_signals() { 2944 sigset_t set; 2945 ::sigemptyset(&set); 2946 ::sigaddset(&set, SIGILL); 2947 ::sigaddset(&set, SIGBUS); 2948 ::sigaddset(&set, SIGFPE); 2949 ::sigaddset(&set, SIGSEGV); 2950 return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL); 2951} 2952 2953// Renamed from 'signalHandler' to avoid collision with other shared libs. 2954void javaSignalHandler(int sig, siginfo_t* info, void* uc) { 2955 assert(info != NULL && uc != NULL, "it must be old kernel"); 2956 2957 // Never leave program error signals blocked; 2958 // on all our platforms they would bring down the process immediately when 2959 // getting raised while being blocked. 2960 unblock_program_error_signals(); 2961 2962 int orig_errno = errno; // Preserve errno value over signal handler. 2963 JVM_handle_aix_signal(sig, info, uc, true); 2964 errno = orig_errno; 2965} 2966 2967// This boolean allows users to forward their own non-matching signals 2968// to JVM_handle_aix_signal, harmlessly. 2969bool os::Aix::signal_handlers_are_installed = false; 2970 2971// For signal-chaining 2972struct sigaction sigact[NSIG]; 2973sigset_t sigs; 2974bool os::Aix::libjsig_is_loaded = false; 2975typedef struct sigaction *(*get_signal_t)(int); 2976get_signal_t os::Aix::get_signal_action = NULL; 2977 2978struct sigaction* os::Aix::get_chained_signal_action(int sig) { 2979 struct sigaction *actp = NULL; 2980 2981 if (libjsig_is_loaded) { 2982 // Retrieve the old signal handler from libjsig 2983 actp = (*get_signal_action)(sig); 2984 } 2985 if (actp == NULL) { 2986 // Retrieve the preinstalled signal handler from jvm 2987 actp = get_preinstalled_handler(sig); 2988 } 2989 2990 return actp; 2991} 2992 2993static bool call_chained_handler(struct sigaction *actp, int sig, 2994 siginfo_t *siginfo, void *context) { 2995 // Call the old signal handler 2996 if (actp->sa_handler == SIG_DFL) { 2997 // It's more reasonable to let jvm treat it as an unexpected exception 2998 // instead of taking the default action. 2999 return false; 3000 } else if (actp->sa_handler != SIG_IGN) { 3001 if ((actp->sa_flags & SA_NODEFER) == 0) { 3002 // automaticlly block the signal 3003 sigaddset(&(actp->sa_mask), sig); 3004 } 3005 3006 sa_handler_t hand = NULL; 3007 sa_sigaction_t sa = NULL; 3008 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 3009 // retrieve the chained handler 3010 if (siginfo_flag_set) { 3011 sa = actp->sa_sigaction; 3012 } else { 3013 hand = actp->sa_handler; 3014 } 3015 3016 if ((actp->sa_flags & SA_RESETHAND) != 0) { 3017 actp->sa_handler = SIG_DFL; 3018 } 3019 3020 // try to honor the signal mask 3021 sigset_t oset; 3022 pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset); 3023 3024 // call into the chained handler 3025 if (siginfo_flag_set) { 3026 (*sa)(sig, siginfo, context); 3027 } else { 3028 (*hand)(sig); 3029 } 3030 3031 // restore the signal mask 3032 pthread_sigmask(SIG_SETMASK, &oset, 0); 3033 } 3034 // Tell jvm's signal handler the signal is taken care of. 3035 return true; 3036} 3037 3038bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) { 3039 bool chained = false; 3040 // signal-chaining 3041 if (UseSignalChaining) { 3042 struct sigaction *actp = get_chained_signal_action(sig); 3043 if (actp != NULL) { 3044 chained = call_chained_handler(actp, sig, siginfo, context); 3045 } 3046 } 3047 return chained; 3048} 3049 3050struct sigaction* os::Aix::get_preinstalled_handler(int sig) { 3051 if (sigismember(&sigs, sig)) { 3052 return &sigact[sig]; 3053 } 3054 return NULL; 3055} 3056 3057void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 3058 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3059 sigact[sig] = oldAct; 3060 sigaddset(&sigs, sig); 3061} 3062 3063// for diagnostic 3064int sigflags[NSIG]; 3065 3066int os::Aix::get_our_sigflags(int sig) { 3067 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3068 return sigflags[sig]; 3069} 3070 3071void os::Aix::set_our_sigflags(int sig, int flags) { 3072 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3073 if (sig > 0 && sig < NSIG) { 3074 sigflags[sig] = flags; 3075 } 3076} 3077 3078void os::Aix::set_signal_handler(int sig, bool set_installed) { 3079 // Check for overwrite. 3080 struct sigaction oldAct; 3081 sigaction(sig, (struct sigaction*)NULL, &oldAct); 3082 3083 void* oldhand = oldAct.sa_sigaction 3084 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3085 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3086 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 3087 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 3088 oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) { 3089 if (AllowUserSignalHandlers || !set_installed) { 3090 // Do not overwrite; user takes responsibility to forward to us. 3091 return; 3092 } else if (UseSignalChaining) { 3093 // save the old handler in jvm 3094 save_preinstalled_handler(sig, oldAct); 3095 // libjsig also interposes the sigaction() call below and saves the 3096 // old sigaction on it own. 3097 } else { 3098 fatal("Encountered unexpected pre-existing sigaction handler " 3099 "%#lx for signal %d.", (long)oldhand, sig); 3100 } 3101 } 3102 3103 struct sigaction sigAct; 3104 sigfillset(&(sigAct.sa_mask)); 3105 if (!set_installed) { 3106 sigAct.sa_handler = SIG_DFL; 3107 sigAct.sa_flags = SA_RESTART; 3108 } else { 3109 sigAct.sa_sigaction = javaSignalHandler; 3110 sigAct.sa_flags = SA_SIGINFO|SA_RESTART; 3111 } 3112 // Save flags, which are set by ours 3113 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3114 sigflags[sig] = sigAct.sa_flags; 3115 3116 int ret = sigaction(sig, &sigAct, &oldAct); 3117 assert(ret == 0, "check"); 3118 3119 void* oldhand2 = oldAct.sa_sigaction 3120 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3121 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3122 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 3123} 3124 3125// install signal handlers for signals that HotSpot needs to 3126// handle in order to support Java-level exception handling. 3127void os::Aix::install_signal_handlers() { 3128 if (!signal_handlers_are_installed) { 3129 signal_handlers_are_installed = true; 3130 3131 // signal-chaining 3132 typedef void (*signal_setting_t)(); 3133 signal_setting_t begin_signal_setting = NULL; 3134 signal_setting_t end_signal_setting = NULL; 3135 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 3136 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 3137 if (begin_signal_setting != NULL) { 3138 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 3139 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 3140 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 3141 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 3142 libjsig_is_loaded = true; 3143 assert(UseSignalChaining, "should enable signal-chaining"); 3144 } 3145 if (libjsig_is_loaded) { 3146 // Tell libjsig jvm is setting signal handlers. 3147 (*begin_signal_setting)(); 3148 } 3149 3150 ::sigemptyset(&sigs); 3151 set_signal_handler(SIGSEGV, true); 3152 set_signal_handler(SIGPIPE, true); 3153 set_signal_handler(SIGBUS, true); 3154 set_signal_handler(SIGILL, true); 3155 set_signal_handler(SIGFPE, true); 3156 set_signal_handler(SIGTRAP, true); 3157 set_signal_handler(SIGXFSZ, true); 3158 set_signal_handler(SIGDANGER, true); 3159 3160 if (libjsig_is_loaded) { 3161 // Tell libjsig jvm finishes setting signal handlers. 3162 (*end_signal_setting)(); 3163 } 3164 3165 // We don't activate signal checker if libjsig is in place, we trust ourselves 3166 // and if UserSignalHandler is installed all bets are off. 3167 // Log that signal checking is off only if -verbose:jni is specified. 3168 if (CheckJNICalls) { 3169 if (libjsig_is_loaded) { 3170 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 3171 check_signals = false; 3172 } 3173 if (AllowUserSignalHandlers) { 3174 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 3175 check_signals = false; 3176 } 3177 // Need to initialize check_signal_done. 3178 ::sigemptyset(&check_signal_done); 3179 } 3180 } 3181} 3182 3183static const char* get_signal_handler_name(address handler, 3184 char* buf, int buflen) { 3185 int offset; 3186 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 3187 if (found) { 3188 // skip directory names 3189 const char *p1, *p2; 3190 p1 = buf; 3191 size_t len = strlen(os::file_separator()); 3192 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 3193 // The way os::dll_address_to_library_name is implemented on Aix 3194 // right now, it always returns -1 for the offset which is not 3195 // terribly informative. 3196 // Will fix that. For now, omit the offset. 3197 jio_snprintf(buf, buflen, "%s", p1); 3198 } else { 3199 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 3200 } 3201 return buf; 3202} 3203 3204static void print_signal_handler(outputStream* st, int sig, 3205 char* buf, size_t buflen) { 3206 struct sigaction sa; 3207 sigaction(sig, NULL, &sa); 3208 3209 st->print("%s: ", os::exception_name(sig, buf, buflen)); 3210 3211 address handler = (sa.sa_flags & SA_SIGINFO) 3212 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 3213 : CAST_FROM_FN_PTR(address, sa.sa_handler); 3214 3215 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 3216 st->print("SIG_DFL"); 3217 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 3218 st->print("SIG_IGN"); 3219 } else { 3220 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 3221 } 3222 3223 // Print readable mask. 3224 st->print(", sa_mask[0]="); 3225 os::Posix::print_signal_set_short(st, &sa.sa_mask); 3226 3227 address rh = VMError::get_resetted_sighandler(sig); 3228 // May be, handler was resetted by VMError? 3229 if (rh != NULL) { 3230 handler = rh; 3231 sa.sa_flags = VMError::get_resetted_sigflags(sig); 3232 } 3233 3234 // Print textual representation of sa_flags. 3235 st->print(", sa_flags="); 3236 os::Posix::print_sa_flags(st, sa.sa_flags); 3237 3238 // Check: is it our handler? 3239 if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) || 3240 handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) { 3241 // It is our signal handler. 3242 // Check for flags, reset system-used one! 3243 if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) { 3244 st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library", 3245 os::Aix::get_our_sigflags(sig)); 3246 } 3247 } 3248 st->cr(); 3249} 3250 3251#define DO_SIGNAL_CHECK(sig) \ 3252 if (!sigismember(&check_signal_done, sig)) \ 3253 os::Aix::check_signal_handler(sig) 3254 3255// This method is a periodic task to check for misbehaving JNI applications 3256// under CheckJNI, we can add any periodic checks here 3257 3258void os::run_periodic_checks() { 3259 3260 if (check_signals == false) return; 3261 3262 // SEGV and BUS if overridden could potentially prevent 3263 // generation of hs*.log in the event of a crash, debugging 3264 // such a case can be very challenging, so we absolutely 3265 // check the following for a good measure: 3266 DO_SIGNAL_CHECK(SIGSEGV); 3267 DO_SIGNAL_CHECK(SIGILL); 3268 DO_SIGNAL_CHECK(SIGFPE); 3269 DO_SIGNAL_CHECK(SIGBUS); 3270 DO_SIGNAL_CHECK(SIGPIPE); 3271 DO_SIGNAL_CHECK(SIGXFSZ); 3272 if (UseSIGTRAP) { 3273 DO_SIGNAL_CHECK(SIGTRAP); 3274 } 3275 DO_SIGNAL_CHECK(SIGDANGER); 3276 3277 // ReduceSignalUsage allows the user to override these handlers 3278 // see comments at the very top and jvm_solaris.h 3279 if (!ReduceSignalUsage) { 3280 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 3281 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 3282 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 3283 DO_SIGNAL_CHECK(BREAK_SIGNAL); 3284 } 3285 3286 DO_SIGNAL_CHECK(SR_signum); 3287} 3288 3289typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 3290 3291static os_sigaction_t os_sigaction = NULL; 3292 3293void os::Aix::check_signal_handler(int sig) { 3294 char buf[O_BUFLEN]; 3295 address jvmHandler = NULL; 3296 3297 struct sigaction act; 3298 if (os_sigaction == NULL) { 3299 // only trust the default sigaction, in case it has been interposed 3300 os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction")); 3301 if (os_sigaction == NULL) return; 3302 } 3303 3304 os_sigaction(sig, (struct sigaction*)NULL, &act); 3305 3306 address thisHandler = (act.sa_flags & SA_SIGINFO) 3307 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 3308 : CAST_FROM_FN_PTR(address, act.sa_handler); 3309 3310 switch(sig) { 3311 case SIGSEGV: 3312 case SIGBUS: 3313 case SIGFPE: 3314 case SIGPIPE: 3315 case SIGILL: 3316 case SIGXFSZ: 3317 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler); 3318 break; 3319 3320 case SHUTDOWN1_SIGNAL: 3321 case SHUTDOWN2_SIGNAL: 3322 case SHUTDOWN3_SIGNAL: 3323 case BREAK_SIGNAL: 3324 jvmHandler = (address)user_handler(); 3325 break; 3326 3327 default: 3328 if (sig == SR_signum) { 3329 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler); 3330 } else { 3331 return; 3332 } 3333 break; 3334 } 3335 3336 if (thisHandler != jvmHandler) { 3337 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 3338 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 3339 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 3340 // No need to check this sig any longer 3341 sigaddset(&check_signal_done, sig); 3342 // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN 3343 if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) { 3344 tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell", 3345 exception_name(sig, buf, O_BUFLEN)); 3346 } 3347 } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) { 3348 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 3349 tty->print("expected:"); 3350 os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig)); 3351 tty->cr(); 3352 tty->print(" found:"); 3353 os::Posix::print_sa_flags(tty, act.sa_flags); 3354 tty->cr(); 3355 // No need to check this sig any longer 3356 sigaddset(&check_signal_done, sig); 3357 } 3358 3359 // Dump all the signal 3360 if (sigismember(&check_signal_done, sig)) { 3361 print_signal_handlers(tty, buf, O_BUFLEN); 3362 } 3363} 3364 3365// To install functions for atexit system call 3366extern "C" { 3367 static void perfMemory_exit_helper() { 3368 perfMemory_exit(); 3369 } 3370} 3371 3372// This is called _before_ the most of global arguments have been parsed. 3373void os::init(void) { 3374 // This is basic, we want to know if that ever changes. 3375 // (Shared memory boundary is supposed to be a 256M aligned.) 3376 assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected"); 3377 3378 // Record process break at startup. 3379 g_brk_at_startup = (address) ::sbrk(0); 3380 assert(g_brk_at_startup != (address) -1, "sbrk failed"); 3381 3382 // First off, we need to know whether we run on AIX or PASE, and 3383 // the OS level we run on. 3384 os::Aix::initialize_os_info(); 3385 3386 // Scan environment (SPEC1170 behaviour, etc). 3387 os::Aix::scan_environment(); 3388 3389 // Probe multipage support. 3390 query_multipage_support(); 3391 3392 // Act like we only have one page size by eliminating corner cases which 3393 // we did not support very well anyway. 3394 // We have two input conditions: 3395 // 1) Data segment page size. This is controlled by linker setting (datapsize) on the 3396 // launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker 3397 // setting. 3398 // Data segment page size is important for us because it defines the thread stack page 3399 // size, which is needed for guard page handling, stack banging etc. 3400 // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can 3401 // and should be allocated with 64k pages. 3402 // 3403 // So, we do the following: 3404 // LDR_CNTRL can_use_64K_pages_dynamically what we do remarks 3405 // 4K no 4K old systems (aix 5.2, as/400 v5r4) or new systems with AME activated 3406 // 4k yes 64k (treat 4k stacks as 64k) different loader than java and standard settings 3407 // 64k no --- AIX 5.2 ? --- 3408 // 64k yes 64k new systems and standard java loader (we set datapsize=64k when linking) 3409 3410 // We explicitly leave no option to change page size, because only upgrading would work, 3411 // not downgrading (if stack page size is 64k you cannot pretend its 4k). 3412 3413 if (g_multipage_support.datapsize == SIZE_4K) { 3414 // datapsize = 4K. Data segment, thread stacks are 4K paged. 3415 if (g_multipage_support.can_use_64K_pages) { 3416 // .. but we are able to use 64K pages dynamically. 3417 // This would be typical for java launchers which are not linked 3418 // with datapsize=64K (like, any other launcher but our own). 3419 // 3420 // In this case it would be smart to allocate the java heap with 64K 3421 // to get the performance benefit, and to fake 64k pages for the 3422 // data segment (when dealing with thread stacks). 3423 // 3424 // However, leave a possibility to downgrade to 4K, using 3425 // -XX:-Use64KPages. 3426 if (Use64KPages) { 3427 trcVerbose("64K page mode (faked for data segment)"); 3428 Aix::_page_size = SIZE_64K; 3429 } else { 3430 trcVerbose("4K page mode (Use64KPages=off)"); 3431 Aix::_page_size = SIZE_4K; 3432 } 3433 } else { 3434 // .. and not able to allocate 64k pages dynamically. Here, just 3435 // fall back to 4K paged mode and use mmap for everything. 3436 trcVerbose("4K page mode"); 3437 Aix::_page_size = SIZE_4K; 3438 FLAG_SET_ERGO(bool, Use64KPages, false); 3439 } 3440 } else { 3441 // datapsize = 64k. Data segment, thread stacks are 64k paged. 3442 // This normally means that we can allocate 64k pages dynamically. 3443 // (There is one special case where this may be false: EXTSHM=on. 3444 // but we decided to not support that mode). 3445 assert0(g_multipage_support.can_use_64K_pages); 3446 Aix::_page_size = SIZE_64K; 3447 trcVerbose("64K page mode"); 3448 FLAG_SET_ERGO(bool, Use64KPages, true); 3449 } 3450 3451 // Short-wire stack page size to base page size; if that works, we just remove 3452 // that stack page size altogether. 3453 Aix::_stack_page_size = Aix::_page_size; 3454 3455 // For now UseLargePages is just ignored. 3456 FLAG_SET_ERGO(bool, UseLargePages, false); 3457 _page_sizes[0] = 0; 3458 3459 // debug trace 3460 trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size())); 3461 3462 // Next, we need to initialize libo4 and libperfstat libraries. 3463 if (os::Aix::on_pase()) { 3464 os::Aix::initialize_libo4(); 3465 } else { 3466 os::Aix::initialize_libperfstat(); 3467 } 3468 3469 // Reset the perfstat information provided by ODM. 3470 if (os::Aix::on_aix()) { 3471 libperfstat::perfstat_reset(); 3472 } 3473 3474 // Now initialze basic system properties. Note that for some of the values we 3475 // need libperfstat etc. 3476 os::Aix::initialize_system_info(); 3477 3478 clock_tics_per_sec = sysconf(_SC_CLK_TCK); 3479 3480 init_random(1234567); 3481 3482 ThreadCritical::initialize(); 3483 3484 // Main_thread points to the aboriginal thread. 3485 Aix::_main_thread = pthread_self(); 3486 3487 initial_time_count = os::elapsed_counter(); 3488} 3489 3490// This is called _after_ the global arguments have been parsed. 3491jint os::init_2(void) { 3492 3493 if (os::Aix::on_pase()) { 3494 trcVerbose("Running on PASE."); 3495 } else { 3496 trcVerbose("Running on AIX (not PASE)."); 3497 } 3498 3499 trcVerbose("processor count: %d", os::_processor_count); 3500 trcVerbose("physical memory: %lu", Aix::_physical_memory); 3501 3502 // Initially build up the loaded dll map. 3503 LoadedLibraries::reload(); 3504 if (Verbose) { 3505 trcVerbose("Loaded Libraries: "); 3506 LoadedLibraries::print(tty); 3507 } 3508 3509 const int page_size = Aix::page_size(); 3510 const int map_size = page_size; 3511 3512 address map_address = (address) MAP_FAILED; 3513 const int prot = PROT_READ; 3514 const int flags = MAP_PRIVATE|MAP_ANONYMOUS; 3515 3516 // Use optimized addresses for the polling page, 3517 // e.g. map it to a special 32-bit address. 3518 if (OptimizePollingPageLocation) { 3519 // architecture-specific list of address wishes: 3520 address address_wishes[] = { 3521 // AIX: addresses lower than 0x30000000 don't seem to work on AIX. 3522 // PPC64: all address wishes are non-negative 32 bit values where 3523 // the lower 16 bits are all zero. we can load these addresses 3524 // with a single ppc_lis instruction. 3525 (address) 0x30000000, (address) 0x31000000, 3526 (address) 0x32000000, (address) 0x33000000, 3527 (address) 0x40000000, (address) 0x41000000, 3528 (address) 0x42000000, (address) 0x43000000, 3529 (address) 0x50000000, (address) 0x51000000, 3530 (address) 0x52000000, (address) 0x53000000, 3531 (address) 0x60000000, (address) 0x61000000, 3532 (address) 0x62000000, (address) 0x63000000 3533 }; 3534 int address_wishes_length = sizeof(address_wishes)/sizeof(address); 3535 3536 // iterate over the list of address wishes: 3537 for (int i=0; i<address_wishes_length; i++) { 3538 // Try to map with current address wish. 3539 // AIX: AIX needs MAP_FIXED if we provide an address and mmap will 3540 // fail if the address is already mapped. 3541 map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size, 3542 map_size, prot, 3543 flags | MAP_FIXED, 3544 -1, 0); 3545 trcVerbose("SafePoint Polling Page address: %p (wish) => %p", 3546 address_wishes[i], map_address + (ssize_t)page_size); 3547 3548 if (map_address + (ssize_t)page_size == address_wishes[i]) { 3549 // Map succeeded and map_address is at wished address, exit loop. 3550 break; 3551 } 3552 3553 if (map_address != (address) MAP_FAILED) { 3554 // Map succeeded, but polling_page is not at wished address, unmap and continue. 3555 ::munmap(map_address, map_size); 3556 map_address = (address) MAP_FAILED; 3557 } 3558 // Map failed, continue loop. 3559 } 3560 } // end OptimizePollingPageLocation 3561 3562 if (map_address == (address) MAP_FAILED) { 3563 map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0); 3564 } 3565 guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page"); 3566 os::set_polling_page(map_address); 3567 3568 if (!UseMembar) { 3569 address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 3570 guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 3571 os::set_memory_serialize_page(mem_serialize_page); 3572 3573 trcVerbose("Memory Serialize Page address: %p - %p, size %IX (%IB)", 3574 mem_serialize_page, mem_serialize_page + Aix::page_size(), 3575 Aix::page_size(), Aix::page_size()); 3576 } 3577 3578 // initialize suspend/resume support - must do this before signal_sets_init() 3579 if (SR_initialize() != 0) { 3580 perror("SR_initialize failed"); 3581 return JNI_ERR; 3582 } 3583 3584 Aix::signal_sets_init(); 3585 Aix::install_signal_handlers(); 3586 3587 // Check minimum allowable stack size for thread creation and to initialize 3588 // the java system classes, including StackOverflowError - depends on page 3589 // size. Add a page for compiler2 recursion in main thread. 3590 // Add in 2*BytesPerWord times page size to account for VM stack during 3591 // class initialization depending on 32 or 64 bit VM. 3592 os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed, 3593 JavaThread::stack_guard_zone_size() + 3594 JavaThread::stack_shadow_zone_size() + 3595 (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size()); 3596 3597 os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size()); 3598 3599 size_t threadStackSizeInBytes = ThreadStackSize * K; 3600 if (threadStackSizeInBytes != 0 && 3601 threadStackSizeInBytes < os::Aix::min_stack_allowed) { 3602 tty->print_cr("\nThe stack size specified is too small, " 3603 "Specify at least %dk", 3604 os::Aix::min_stack_allowed / K); 3605 return JNI_ERR; 3606 } 3607 3608 // Make the stack size a multiple of the page size so that 3609 // the yellow/red zones can be guarded. 3610 // Note that this can be 0, if no default stacksize was set. 3611 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size())); 3612 3613 if (UseNUMA) { 3614 UseNUMA = false; 3615 warning("NUMA optimizations are not available on this OS."); 3616 } 3617 3618 if (MaxFDLimit) { 3619 // Set the number of file descriptors to max. print out error 3620 // if getrlimit/setrlimit fails but continue regardless. 3621 struct rlimit nbr_files; 3622 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 3623 if (status != 0) { 3624 if (PrintMiscellaneous && (Verbose || WizardMode)) 3625 perror("os::init_2 getrlimit failed"); 3626 } else { 3627 nbr_files.rlim_cur = nbr_files.rlim_max; 3628 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 3629 if (status != 0) { 3630 if (PrintMiscellaneous && (Verbose || WizardMode)) 3631 perror("os::init_2 setrlimit failed"); 3632 } 3633 } 3634 } 3635 3636 if (PerfAllowAtExitRegistration) { 3637 // Only register atexit functions if PerfAllowAtExitRegistration is set. 3638 // At exit functions can be delayed until process exit time, which 3639 // can be problematic for embedded VM situations. Embedded VMs should 3640 // call DestroyJavaVM() to assure that VM resources are released. 3641 3642 // Note: perfMemory_exit_helper atexit function may be removed in 3643 // the future if the appropriate cleanup code can be added to the 3644 // VM_Exit VMOperation's doit method. 3645 if (atexit(perfMemory_exit_helper) != 0) { 3646 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 3647 } 3648 } 3649 3650 return JNI_OK; 3651} 3652 3653// Mark the polling page as unreadable 3654void os::make_polling_page_unreadable(void) { 3655 if (!guard_memory((char*)_polling_page, Aix::page_size())) { 3656 fatal("Could not disable polling page"); 3657 } 3658}; 3659 3660// Mark the polling page as readable 3661void os::make_polling_page_readable(void) { 3662 // Changed according to os_linux.cpp. 3663 if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) { 3664 fatal("Could not enable polling page at " PTR_FORMAT, _polling_page); 3665 } 3666}; 3667 3668int os::active_processor_count() { 3669 int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN); 3670 assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check"); 3671 return online_cpus; 3672} 3673 3674void os::set_native_thread_name(const char *name) { 3675 // Not yet implemented. 3676 return; 3677} 3678 3679bool os::distribute_processes(uint length, uint* distribution) { 3680 // Not yet implemented. 3681 return false; 3682} 3683 3684bool os::bind_to_processor(uint processor_id) { 3685 // Not yet implemented. 3686 return false; 3687} 3688 3689void os::SuspendedThreadTask::internal_do_task() { 3690 if (do_suspend(_thread->osthread())) { 3691 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 3692 do_task(context); 3693 do_resume(_thread->osthread()); 3694 } 3695} 3696 3697class PcFetcher : public os::SuspendedThreadTask { 3698public: 3699 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} 3700 ExtendedPC result(); 3701protected: 3702 void do_task(const os::SuspendedThreadTaskContext& context); 3703private: 3704 ExtendedPC _epc; 3705}; 3706 3707ExtendedPC PcFetcher::result() { 3708 guarantee(is_done(), "task is not done yet."); 3709 return _epc; 3710} 3711 3712void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { 3713 Thread* thread = context.thread(); 3714 OSThread* osthread = thread->osthread(); 3715 if (osthread->ucontext() != NULL) { 3716 _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext()); 3717 } else { 3718 // NULL context is unexpected, double-check this is the VMThread. 3719 guarantee(thread->is_VM_thread(), "can only be called for VMThread"); 3720 } 3721} 3722 3723// Suspends the target using the signal mechanism and then grabs the PC before 3724// resuming the target. Used by the flat-profiler only 3725ExtendedPC os::get_thread_pc(Thread* thread) { 3726 // Make sure that it is called by the watcher for the VMThread. 3727 assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); 3728 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 3729 3730 PcFetcher fetcher(thread); 3731 fetcher.run(); 3732 return fetcher.result(); 3733} 3734 3735//////////////////////////////////////////////////////////////////////////////// 3736// debug support 3737 3738bool os::find(address addr, outputStream* st) { 3739 3740 st->print(PTR_FORMAT ": ", addr); 3741 3742 loaded_module_t lm; 3743 if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL || 3744 LoadedLibraries::find_for_data_address(addr, &lm) != NULL) { 3745 st->print_cr("%s", lm.path); 3746 return true; 3747 } 3748 3749 return false; 3750} 3751 3752//////////////////////////////////////////////////////////////////////////////// 3753// misc 3754 3755// This does not do anything on Aix. This is basically a hook for being 3756// able to use structured exception handling (thread-local exception filters) 3757// on, e.g., Win32. 3758void 3759os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, 3760 JavaCallArguments* args, Thread* thread) { 3761 f(value, method, args, thread); 3762} 3763 3764void os::print_statistics() { 3765} 3766 3767bool os::message_box(const char* title, const char* message) { 3768 int i; 3769 fdStream err(defaultStream::error_fd()); 3770 for (i = 0; i < 78; i++) err.print_raw("="); 3771 err.cr(); 3772 err.print_raw_cr(title); 3773 for (i = 0; i < 78; i++) err.print_raw("-"); 3774 err.cr(); 3775 err.print_raw_cr(message); 3776 for (i = 0; i < 78; i++) err.print_raw("="); 3777 err.cr(); 3778 3779 char buf[16]; 3780 // Prevent process from exiting upon "read error" without consuming all CPU 3781 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 3782 3783 return buf[0] == 'y' || buf[0] == 'Y'; 3784} 3785 3786int os::stat(const char *path, struct stat *sbuf) { 3787 char pathbuf[MAX_PATH]; 3788 if (strlen(path) > MAX_PATH - 1) { 3789 errno = ENAMETOOLONG; 3790 return -1; 3791 } 3792 os::native_path(strcpy(pathbuf, path)); 3793 return ::stat(pathbuf, sbuf); 3794} 3795 3796bool os::check_heap(bool force) { 3797 return true; 3798} 3799 3800// Is a (classpath) directory empty? 3801bool os::dir_is_empty(const char* path) { 3802 DIR *dir = NULL; 3803 struct dirent *ptr; 3804 3805 dir = opendir(path); 3806 if (dir == NULL) return true; 3807 3808 /* Scan the directory */ 3809 bool result = true; 3810 char buf[sizeof(struct dirent) + MAX_PATH]; 3811 while (result && (ptr = ::readdir(dir)) != NULL) { 3812 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 3813 result = false; 3814 } 3815 } 3816 closedir(dir); 3817 return result; 3818} 3819 3820// This code originates from JDK's sysOpen and open64_w 3821// from src/solaris/hpi/src/system_md.c 3822 3823int os::open(const char *path, int oflag, int mode) { 3824 3825 if (strlen(path) > MAX_PATH - 1) { 3826 errno = ENAMETOOLONG; 3827 return -1; 3828 } 3829 int fd; 3830 3831 fd = ::open64(path, oflag, mode); 3832 if (fd == -1) return -1; 3833 3834 // If the open succeeded, the file might still be a directory. 3835 { 3836 struct stat64 buf64; 3837 int ret = ::fstat64(fd, &buf64); 3838 int st_mode = buf64.st_mode; 3839 3840 if (ret != -1) { 3841 if ((st_mode & S_IFMT) == S_IFDIR) { 3842 errno = EISDIR; 3843 ::close(fd); 3844 return -1; 3845 } 3846 } else { 3847 ::close(fd); 3848 return -1; 3849 } 3850 } 3851 3852 // All file descriptors that are opened in the JVM and not 3853 // specifically destined for a subprocess should have the 3854 // close-on-exec flag set. If we don't set it, then careless 3rd 3855 // party native code might fork and exec without closing all 3856 // appropriate file descriptors (e.g. as we do in closeDescriptors in 3857 // UNIXProcess.c), and this in turn might: 3858 // 3859 // - cause end-of-file to fail to be detected on some file 3860 // descriptors, resulting in mysterious hangs, or 3861 // 3862 // - might cause an fopen in the subprocess to fail on a system 3863 // suffering from bug 1085341. 3864 // 3865 // (Yes, the default setting of the close-on-exec flag is a Unix 3866 // design flaw.) 3867 // 3868 // See: 3869 // 1085341: 32-bit stdio routines should support file descriptors >255 3870 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed 3871 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 3872#ifdef FD_CLOEXEC 3873 { 3874 int flags = ::fcntl(fd, F_GETFD); 3875 if (flags != -1) 3876 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 3877 } 3878#endif 3879 3880 return fd; 3881} 3882 3883// create binary file, rewriting existing file if required 3884int os::create_binary_file(const char* path, bool rewrite_existing) { 3885 int oflags = O_WRONLY | O_CREAT; 3886 if (!rewrite_existing) { 3887 oflags |= O_EXCL; 3888 } 3889 return ::open64(path, oflags, S_IREAD | S_IWRITE); 3890} 3891 3892// return current position of file pointer 3893jlong os::current_file_offset(int fd) { 3894 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 3895} 3896 3897// move file pointer to the specified offset 3898jlong os::seek_to_file_offset(int fd, jlong offset) { 3899 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 3900} 3901 3902// This code originates from JDK's sysAvailable 3903// from src/solaris/hpi/src/native_threads/src/sys_api_td.c 3904 3905int os::available(int fd, jlong *bytes) { 3906 jlong cur, end; 3907 int mode; 3908 struct stat64 buf64; 3909 3910 if (::fstat64(fd, &buf64) >= 0) { 3911 mode = buf64.st_mode; 3912 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 3913 int n; 3914 if (::ioctl(fd, FIONREAD, &n) >= 0) { 3915 *bytes = n; 3916 return 1; 3917 } 3918 } 3919 } 3920 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 3921 return 0; 3922 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 3923 return 0; 3924 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 3925 return 0; 3926 } 3927 *bytes = end - cur; 3928 return 1; 3929} 3930 3931// Map a block of memory. 3932char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 3933 char *addr, size_t bytes, bool read_only, 3934 bool allow_exec) { 3935 int prot; 3936 int flags = MAP_PRIVATE; 3937 3938 if (read_only) { 3939 prot = PROT_READ; 3940 flags = MAP_SHARED; 3941 } else { 3942 prot = PROT_READ | PROT_WRITE; 3943 flags = MAP_PRIVATE; 3944 } 3945 3946 if (allow_exec) { 3947 prot |= PROT_EXEC; 3948 } 3949 3950 if (addr != NULL) { 3951 flags |= MAP_FIXED; 3952 } 3953 3954 // Allow anonymous mappings if 'fd' is -1. 3955 if (fd == -1) { 3956 flags |= MAP_ANONYMOUS; 3957 } 3958 3959 char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags, 3960 fd, file_offset); 3961 if (mapped_address == MAP_FAILED) { 3962 return NULL; 3963 } 3964 return mapped_address; 3965} 3966 3967// Remap a block of memory. 3968char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 3969 char *addr, size_t bytes, bool read_only, 3970 bool allow_exec) { 3971 // same as map_memory() on this OS 3972 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 3973 allow_exec); 3974} 3975 3976// Unmap a block of memory. 3977bool os::pd_unmap_memory(char* addr, size_t bytes) { 3978 return munmap(addr, bytes) == 0; 3979} 3980 3981// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 3982// are used by JVM M&M and JVMTI to get user+sys or user CPU time 3983// of a thread. 3984// 3985// current_thread_cpu_time() and thread_cpu_time(Thread*) returns 3986// the fast estimate available on the platform. 3987 3988jlong os::current_thread_cpu_time() { 3989 // return user + sys since the cost is the same 3990 const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */); 3991 assert(n >= 0, "negative CPU time"); 3992 return n; 3993} 3994 3995jlong os::thread_cpu_time(Thread* thread) { 3996 // consistent with what current_thread_cpu_time() returns 3997 const jlong n = os::thread_cpu_time(thread, true /* user + sys */); 3998 assert(n >= 0, "negative CPU time"); 3999 return n; 4000} 4001 4002jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4003 const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4004 assert(n >= 0, "negative CPU time"); 4005 return n; 4006} 4007 4008static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) { 4009 bool error = false; 4010 4011 jlong sys_time = 0; 4012 jlong user_time = 0; 4013 4014 // Reimplemented using getthrds64(). 4015 // 4016 // Works like this: 4017 // For the thread in question, get the kernel thread id. Then get the 4018 // kernel thread statistics using that id. 4019 // 4020 // This only works of course when no pthread scheduling is used, 4021 // i.e. there is a 1:1 relationship to kernel threads. 4022 // On AIX, see AIXTHREAD_SCOPE variable. 4023 4024 pthread_t pthtid = thread->osthread()->pthread_id(); 4025 4026 // retrieve kernel thread id for the pthread: 4027 tid64_t tid = 0; 4028 struct __pthrdsinfo pinfo; 4029 // I just love those otherworldly IBM APIs which force me to hand down 4030 // dummy buffers for stuff I dont care for... 4031 char dummy[1]; 4032 int dummy_size = sizeof(dummy); 4033 if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo), 4034 dummy, &dummy_size) == 0) { 4035 tid = pinfo.__pi_tid; 4036 } else { 4037 tty->print_cr("pthread_getthrds_np failed."); 4038 error = true; 4039 } 4040 4041 // retrieve kernel timing info for that kernel thread 4042 if (!error) { 4043 struct thrdentry64 thrdentry; 4044 if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) { 4045 sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL; 4046 user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL; 4047 } else { 4048 tty->print_cr("pthread_getthrds_np failed."); 4049 error = true; 4050 } 4051 } 4052 4053 if (p_sys_time) { 4054 *p_sys_time = sys_time; 4055 } 4056 4057 if (p_user_time) { 4058 *p_user_time = user_time; 4059 } 4060 4061 if (error) { 4062 return false; 4063 } 4064 4065 return true; 4066} 4067 4068jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 4069 jlong sys_time; 4070 jlong user_time; 4071 4072 if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) { 4073 return -1; 4074 } 4075 4076 return user_sys_cpu_time ? sys_time + user_time : user_time; 4077} 4078 4079void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4080 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 4081 info_ptr->may_skip_backward = false; // elapsed time not wall time 4082 info_ptr->may_skip_forward = false; // elapsed time not wall time 4083 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4084} 4085 4086void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4087 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 4088 info_ptr->may_skip_backward = false; // elapsed time not wall time 4089 info_ptr->may_skip_forward = false; // elapsed time not wall time 4090 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4091} 4092 4093bool os::is_thread_cpu_time_supported() { 4094 return true; 4095} 4096 4097// System loadavg support. Returns -1 if load average cannot be obtained. 4098// For now just return the system wide load average (no processor sets). 4099int os::loadavg(double values[], int nelem) { 4100 4101 guarantee(nelem >= 0 && nelem <= 3, "argument error"); 4102 guarantee(values, "argument error"); 4103 4104 if (os::Aix::on_pase()) { 4105 4106 // AS/400 PASE: use libo4 porting library 4107 double v[3] = { 0.0, 0.0, 0.0 }; 4108 4109 if (libo4::get_load_avg(v, v + 1, v + 2)) { 4110 for (int i = 0; i < nelem; i ++) { 4111 values[i] = v[i]; 4112 } 4113 return nelem; 4114 } else { 4115 return -1; 4116 } 4117 4118 } else { 4119 4120 // AIX: use libperfstat 4121 libperfstat::cpuinfo_t ci; 4122 if (libperfstat::get_cpuinfo(&ci)) { 4123 for (int i = 0; i < nelem; i++) { 4124 values[i] = ci.loadavg[i]; 4125 } 4126 } else { 4127 return -1; 4128 } 4129 return nelem; 4130 } 4131} 4132 4133void os::pause() { 4134 char filename[MAX_PATH]; 4135 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4136 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4137 } else { 4138 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4139 } 4140 4141 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4142 if (fd != -1) { 4143 struct stat buf; 4144 ::close(fd); 4145 while (::stat(filename, &buf) == 0) { 4146 (void)::poll(NULL, 0, 100); 4147 } 4148 } else { 4149 trcVerbose("Could not open pause file '%s', continuing immediately.", filename); 4150 } 4151} 4152 4153bool os::Aix::is_primordial_thread() { 4154 if (pthread_self() == (pthread_t)1) { 4155 return true; 4156 } else { 4157 return false; 4158 } 4159} 4160 4161// OS recognitions (PASE/AIX, OS level) call this before calling any 4162// one of Aix::on_pase(), Aix::os_version() static 4163void os::Aix::initialize_os_info() { 4164 4165 assert(_on_pase == -1 && _os_version == 0, "already called."); 4166 4167 struct utsname uts; 4168 memset(&uts, 0, sizeof(uts)); 4169 strcpy(uts.sysname, "?"); 4170 if (::uname(&uts) == -1) { 4171 trcVerbose("uname failed (%d)", errno); 4172 guarantee(0, "Could not determine whether we run on AIX or PASE"); 4173 } else { 4174 trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" " 4175 "node \"%s\" machine \"%s\"\n", 4176 uts.sysname, uts.version, uts.release, uts.nodename, uts.machine); 4177 const int major = atoi(uts.version); 4178 assert(major > 0, "invalid OS version"); 4179 const int minor = atoi(uts.release); 4180 assert(minor > 0, "invalid OS release"); 4181 _os_version = (major << 24) | (minor << 16); 4182 char ver_str[20] = {0}; 4183 char *name_str = "unknown OS"; 4184 if (strcmp(uts.sysname, "OS400") == 0) { 4185 // We run on AS/400 PASE. We do not support versions older than V5R4M0. 4186 _on_pase = 1; 4187 if (os_version_short() < 0x0504) { 4188 trcVerbose("OS/400 releases older than V5R4M0 not supported."); 4189 assert(false, "OS/400 release too old."); 4190 } 4191 name_str = "OS/400 (pase)"; 4192 jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor); 4193 } else if (strcmp(uts.sysname, "AIX") == 0) { 4194 // We run on AIX. We do not support versions older than AIX 5.3. 4195 _on_pase = 0; 4196 // Determine detailed AIX version: Version, Release, Modification, Fix Level. 4197 odmWrapper::determine_os_kernel_version(&_os_version); 4198 if (os_version_short() < 0x0503) { 4199 trcVerbose("AIX release older than AIX 5.3 not supported."); 4200 assert(false, "AIX release too old."); 4201 } 4202 name_str = "AIX"; 4203 jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u", 4204 major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF); 4205 } else { 4206 assert(false, name_str); 4207 } 4208 trcVerbose("We run on %s %s", name_str, ver_str); 4209 } 4210 4211 guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release"); 4212} // end: os::Aix::initialize_os_info() 4213 4214// Scan environment for important settings which might effect the VM. 4215// Trace out settings. Warn about invalid settings and/or correct them. 4216// 4217// Must run after os::Aix::initialue_os_info(). 4218void os::Aix::scan_environment() { 4219 4220 char* p; 4221 int rc; 4222 4223 // Warn explicity if EXTSHM=ON is used. That switch changes how 4224 // System V shared memory behaves. One effect is that page size of 4225 // shared memory cannot be change dynamically, effectivly preventing 4226 // large pages from working. 4227 // This switch was needed on AIX 32bit, but on AIX 64bit the general 4228 // recommendation is (in OSS notes) to switch it off. 4229 p = ::getenv("EXTSHM"); 4230 trcVerbose("EXTSHM=%s.", p ? p : "<unset>"); 4231 if (p && strcasecmp(p, "ON") == 0) { 4232 _extshm = 1; 4233 trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***"); 4234 if (!AllowExtshm) { 4235 // We allow under certain conditions the user to continue. However, we want this 4236 // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means 4237 // that the VM is not able to allocate 64k pages for the heap. 4238 // We do not want to run with reduced performance. 4239 vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment."); 4240 } 4241 } else { 4242 _extshm = 0; 4243 } 4244 4245 // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs. 4246 // Not tested, not supported. 4247 // 4248 // Note that it might be worth the trouble to test and to require it, if only to 4249 // get useful return codes for mprotect. 4250 // 4251 // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before 4252 // exec() ? before loading the libjvm ? ....) 4253 p = ::getenv("XPG_SUS_ENV"); 4254 trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>"); 4255 if (p && strcmp(p, "ON") == 0) { 4256 _xpg_sus_mode = 1; 4257 trcVerbose("Unsupported setting: XPG_SUS_ENV=ON"); 4258 // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to 4259 // clobber address ranges. If we ever want to support that, we have to do some 4260 // testing first. 4261 guarantee(false, "XPG_SUS_ENV=ON not supported"); 4262 } else { 4263 _xpg_sus_mode = 0; 4264 } 4265 4266 if (os::Aix::on_pase()) { 4267 p = ::getenv("QIBM_MULTI_THREADED"); 4268 trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>"); 4269 } 4270 4271 p = ::getenv("LDR_CNTRL"); 4272 trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>"); 4273 if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) { 4274 if (p && ::strstr(p, "TEXTPSIZE")) { 4275 trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. " 4276 "you may experience hangs or crashes on OS/400 V7R1."); 4277 } 4278 } 4279 4280 p = ::getenv("AIXTHREAD_GUARDPAGES"); 4281 trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>"); 4282 4283} // end: os::Aix::scan_environment() 4284 4285// PASE: initialize the libo4 library (PASE porting library). 4286void os::Aix::initialize_libo4() { 4287 guarantee(os::Aix::on_pase(), "OS/400 only."); 4288 if (!libo4::init()) { 4289 trcVerbose("libo4 initialization failed."); 4290 assert(false, "libo4 initialization failed"); 4291 } else { 4292 trcVerbose("libo4 initialized."); 4293 } 4294} 4295 4296// AIX: initialize the libperfstat library. 4297void os::Aix::initialize_libperfstat() { 4298 assert(os::Aix::on_aix(), "AIX only"); 4299 if (!libperfstat::init()) { 4300 trcVerbose("libperfstat initialization failed."); 4301 assert(false, "libperfstat initialization failed"); 4302 } else { 4303 trcVerbose("libperfstat initialized."); 4304 } 4305} 4306 4307///////////////////////////////////////////////////////////////////////////// 4308// thread stack 4309 4310// Function to query the current stack size using pthread_getthrds_np. 4311static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) { 4312 // This only works when invoked on a pthread. As we agreed not to use 4313 // primordial threads anyway, I assert here. 4314 guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread"); 4315 4316 // Information about this api can be found (a) in the pthread.h header and 4317 // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm 4318 // 4319 // The use of this API to find out the current stack is kind of undefined. 4320 // But after a lot of tries and asking IBM about it, I concluded that it is safe 4321 // enough for cases where I let the pthread library create its stacks. For cases 4322 // where I create an own stack and pass this to pthread_create, it seems not to 4323 // work (the returned stack size in that case is 0). 4324 4325 pthread_t tid = pthread_self(); 4326 struct __pthrdsinfo pinfo; 4327 char dummy[1]; // Just needed to satisfy pthread_getthrds_np. 4328 int dummy_size = sizeof(dummy); 4329 4330 memset(&pinfo, 0, sizeof(pinfo)); 4331 4332 const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo, 4333 sizeof(pinfo), dummy, &dummy_size); 4334 4335 if (rc != 0) { 4336 assert0(false); 4337 trcVerbose("pthread_getthrds_np failed (%d)", rc); 4338 return false; 4339 } 4340 guarantee0(pinfo.__pi_stackend); 4341 4342 // The following may happen when invoking pthread_getthrds_np on a pthread 4343 // running on a user provided stack (when handing down a stack to pthread 4344 // create, see pthread_attr_setstackaddr). 4345 // Not sure what to do then. 4346 4347 guarantee0(pinfo.__pi_stacksize); 4348 4349 // Note: we get three values from pthread_getthrds_np: 4350 // __pi_stackaddr, __pi_stacksize, __pi_stackend 4351 // 4352 // high addr --------------------- 4353 // 4354 // | pthread internal data, like ~2K 4355 // | 4356 // | --------------------- __pi_stackend (usually not page aligned, (xxxxF890)) 4357 // | 4358 // | 4359 // | 4360 // | 4361 // | 4362 // | 4363 // | --------------------- (__pi_stackend - __pi_stacksize) 4364 // | 4365 // | padding to align the following AIX guard pages, if enabled. 4366 // | 4367 // V --------------------- __pi_stackaddr 4368 // 4369 // low addr AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0) 4370 // 4371 4372 address stack_base = (address)(pinfo.__pi_stackend); 4373 address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr, 4374 os::vm_page_size()); 4375 size_t stack_size = stack_base - stack_low_addr; 4376 4377 if (p_stack_base) { 4378 *p_stack_base = stack_base; 4379 } 4380 4381 if (p_stack_size) { 4382 *p_stack_size = stack_size; 4383 } 4384 4385 return true; 4386} 4387 4388// Get the current stack base from the OS (actually, the pthread library). 4389address os::current_stack_base() { 4390 address p; 4391 query_stack_dimensions(&p, 0); 4392 return p; 4393} 4394 4395// Get the current stack size from the OS (actually, the pthread library). 4396size_t os::current_stack_size() { 4397 size_t s; 4398 query_stack_dimensions(0, &s); 4399 return s; 4400} 4401 4402// Refer to the comments in os_solaris.cpp park-unpark. 4403 4404// utility to compute the abstime argument to timedwait: 4405// millis is the relative timeout time 4406// abstime will be the absolute timeout time 4407// TODO: replace compute_abstime() with unpackTime() 4408 4409static struct timespec* compute_abstime(timespec* abstime, jlong millis) { 4410 if (millis < 0) millis = 0; 4411 struct timeval now; 4412 int status = gettimeofday(&now, NULL); 4413 assert(status == 0, "gettimeofday"); 4414 jlong seconds = millis / 1000; 4415 millis %= 1000; 4416 if (seconds > 50000000) { // see man cond_timedwait(3T) 4417 seconds = 50000000; 4418 } 4419 abstime->tv_sec = now.tv_sec + seconds; 4420 long usec = now.tv_usec + millis * 1000; 4421 if (usec >= 1000000) { 4422 abstime->tv_sec += 1; 4423 usec -= 1000000; 4424 } 4425 abstime->tv_nsec = usec * 1000; 4426 return abstime; 4427} 4428 4429// Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 4430// Conceptually TryPark() should be equivalent to park(0). 4431 4432int os::PlatformEvent::TryPark() { 4433 for (;;) { 4434 const int v = _Event; 4435 guarantee ((v == 0) || (v == 1), "invariant"); 4436 if (Atomic::cmpxchg (0, &_Event, v) == v) return v; 4437 } 4438} 4439 4440void os::PlatformEvent::park() { // AKA "down()" 4441 // Invariant: Only the thread associated with the Event/PlatformEvent 4442 // may call park(). 4443 // TODO: assert that _Assoc != NULL or _Assoc == Self 4444 int v; 4445 for (;;) { 4446 v = _Event; 4447 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break; 4448 } 4449 guarantee (v >= 0, "invariant"); 4450 if (v == 0) { 4451 // Do this the hard way by blocking ... 4452 int status = pthread_mutex_lock(_mutex); 4453 assert_status(status == 0, status, "mutex_lock"); 4454 guarantee (_nParked == 0, "invariant"); 4455 ++ _nParked; 4456 while (_Event < 0) { 4457 status = pthread_cond_wait(_cond, _mutex); 4458 assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait"); 4459 } 4460 -- _nParked; 4461 4462 // In theory we could move the ST of 0 into _Event past the unlock(), 4463 // but then we'd need a MEMBAR after the ST. 4464 _Event = 0; 4465 status = pthread_mutex_unlock(_mutex); 4466 assert_status(status == 0, status, "mutex_unlock"); 4467 } 4468 guarantee (_Event >= 0, "invariant"); 4469} 4470 4471int os::PlatformEvent::park(jlong millis) { 4472 guarantee (_nParked == 0, "invariant"); 4473 4474 int v; 4475 for (;;) { 4476 v = _Event; 4477 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break; 4478 } 4479 guarantee (v >= 0, "invariant"); 4480 if (v != 0) return OS_OK; 4481 4482 // We do this the hard way, by blocking the thread. 4483 // Consider enforcing a minimum timeout value. 4484 struct timespec abst; 4485 compute_abstime(&abst, millis); 4486 4487 int ret = OS_TIMEOUT; 4488 int status = pthread_mutex_lock(_mutex); 4489 assert_status(status == 0, status, "mutex_lock"); 4490 guarantee (_nParked == 0, "invariant"); 4491 ++_nParked; 4492 4493 // Object.wait(timo) will return because of 4494 // (a) notification 4495 // (b) timeout 4496 // (c) thread.interrupt 4497 // 4498 // Thread.interrupt and object.notify{All} both call Event::set. 4499 // That is, we treat thread.interrupt as a special case of notification. 4500 // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false. 4501 // We assume all ETIME returns are valid. 4502 // 4503 // TODO: properly differentiate simultaneous notify+interrupt. 4504 // In that case, we should propagate the notify to another waiter. 4505 4506 while (_Event < 0) { 4507 status = pthread_cond_timedwait(_cond, _mutex, &abst); 4508 assert_status(status == 0 || status == ETIMEDOUT, 4509 status, "cond_timedwait"); 4510 if (!FilterSpuriousWakeups) break; // previous semantics 4511 if (status == ETIMEDOUT) break; 4512 // We consume and ignore EINTR and spurious wakeups. 4513 } 4514 --_nParked; 4515 if (_Event >= 0) { 4516 ret = OS_OK; 4517 } 4518 _Event = 0; 4519 status = pthread_mutex_unlock(_mutex); 4520 assert_status(status == 0, status, "mutex_unlock"); 4521 assert (_nParked == 0, "invariant"); 4522 return ret; 4523} 4524 4525void os::PlatformEvent::unpark() { 4526 int v, AnyWaiters; 4527 for (;;) { 4528 v = _Event; 4529 if (v > 0) { 4530 // The LD of _Event could have reordered or be satisfied 4531 // by a read-aside from this processor's write buffer. 4532 // To avoid problems execute a barrier and then 4533 // ratify the value. 4534 OrderAccess::fence(); 4535 if (_Event == v) return; 4536 continue; 4537 } 4538 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break; 4539 } 4540 if (v < 0) { 4541 // Wait for the thread associated with the event to vacate 4542 int status = pthread_mutex_lock(_mutex); 4543 assert_status(status == 0, status, "mutex_lock"); 4544 AnyWaiters = _nParked; 4545 4546 if (AnyWaiters != 0) { 4547 // We intentional signal *after* dropping the lock 4548 // to avoid a common class of futile wakeups. 4549 status = pthread_cond_signal(_cond); 4550 assert_status(status == 0, status, "cond_signal"); 4551 } 4552 // Mutex should be locked for pthread_cond_signal(_cond). 4553 status = pthread_mutex_unlock(_mutex); 4554 assert_status(status == 0, status, "mutex_unlock"); 4555 } 4556 4557 // Note that we signal() _after dropping the lock for "immortal" Events. 4558 // This is safe and avoids a common class of futile wakeups. In rare 4559 // circumstances this can cause a thread to return prematurely from 4560 // cond_{timed}wait() but the spurious wakeup is benign and the victim will 4561 // simply re-test the condition and re-park itself. 4562} 4563 4564 4565// JSR166 4566// ------------------------------------------------------- 4567 4568// 4569// The solaris and linux implementations of park/unpark are fairly 4570// conservative for now, but can be improved. They currently use a 4571// mutex/condvar pair, plus a a count. 4572// Park decrements count if > 0, else does a condvar wait. Unpark 4573// sets count to 1 and signals condvar. Only one thread ever waits 4574// on the condvar. Contention seen when trying to park implies that someone 4575// is unparking you, so don't wait. And spurious returns are fine, so there 4576// is no need to track notifications. 4577// 4578 4579#define MAX_SECS 100000000 4580// 4581// This code is common to linux and solaris and will be moved to a 4582// common place in dolphin. 4583// 4584// The passed in time value is either a relative time in nanoseconds 4585// or an absolute time in milliseconds. Either way it has to be unpacked 4586// into suitable seconds and nanoseconds components and stored in the 4587// given timespec structure. 4588// Given time is a 64-bit value and the time_t used in the timespec is only 4589// a signed-32-bit value (except on 64-bit Linux) we have to watch for 4590// overflow if times way in the future are given. Further on Solaris versions 4591// prior to 10 there is a restriction (see cond_timedwait) that the specified 4592// number of seconds, in abstime, is less than current_time + 100,000,000. 4593// As it will be 28 years before "now + 100000000" will overflow we can 4594// ignore overflow and just impose a hard-limit on seconds using the value 4595// of "now + 100,000,000". This places a limit on the timeout of about 3.17 4596// years from "now". 4597// 4598 4599static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 4600 assert (time > 0, "convertTime"); 4601 4602 struct timeval now; 4603 int status = gettimeofday(&now, NULL); 4604 assert(status == 0, "gettimeofday"); 4605 4606 time_t max_secs = now.tv_sec + MAX_SECS; 4607 4608 if (isAbsolute) { 4609 jlong secs = time / 1000; 4610 if (secs > max_secs) { 4611 absTime->tv_sec = max_secs; 4612 } 4613 else { 4614 absTime->tv_sec = secs; 4615 } 4616 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 4617 } 4618 else { 4619 jlong secs = time / NANOSECS_PER_SEC; 4620 if (secs >= MAX_SECS) { 4621 absTime->tv_sec = max_secs; 4622 absTime->tv_nsec = 0; 4623 } 4624 else { 4625 absTime->tv_sec = now.tv_sec + secs; 4626 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 4627 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 4628 absTime->tv_nsec -= NANOSECS_PER_SEC; 4629 ++absTime->tv_sec; // note: this must be <= max_secs 4630 } 4631 } 4632 } 4633 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 4634 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 4635 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 4636 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 4637} 4638 4639void Parker::park(bool isAbsolute, jlong time) { 4640 // Optional fast-path check: 4641 // Return immediately if a permit is available. 4642 if (_counter > 0) { 4643 _counter = 0; 4644 OrderAccess::fence(); 4645 return; 4646 } 4647 4648 Thread* thread = Thread::current(); 4649 assert(thread->is_Java_thread(), "Must be JavaThread"); 4650 JavaThread *jt = (JavaThread *)thread; 4651 4652 // Optional optimization -- avoid state transitions if there's an interrupt pending. 4653 // Check interrupt before trying to wait 4654 if (Thread::is_interrupted(thread, false)) { 4655 return; 4656 } 4657 4658 // Next, demultiplex/decode time arguments 4659 timespec absTime; 4660 if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all 4661 return; 4662 } 4663 if (time > 0) { 4664 unpackTime(&absTime, isAbsolute, time); 4665 } 4666 4667 // Enter safepoint region 4668 // Beware of deadlocks such as 6317397. 4669 // The per-thread Parker:: mutex is a classic leaf-lock. 4670 // In particular a thread must never block on the Threads_lock while 4671 // holding the Parker:: mutex. If safepoints are pending both the 4672 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 4673 ThreadBlockInVM tbivm(jt); 4674 4675 // Don't wait if cannot get lock since interference arises from 4676 // unblocking. Also. check interrupt before trying wait 4677 if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) { 4678 return; 4679 } 4680 4681 int status; 4682 if (_counter > 0) { // no wait needed 4683 _counter = 0; 4684 status = pthread_mutex_unlock(_mutex); 4685 assert (status == 0, "invariant"); 4686 OrderAccess::fence(); 4687 return; 4688 } 4689 4690#ifdef ASSERT 4691 // Don't catch signals while blocked; let the running threads have the signals. 4692 // (This allows a debugger to break into the running thread.) 4693 sigset_t oldsigs; 4694 sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals(); 4695 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 4696#endif 4697 4698 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4699 jt->set_suspend_equivalent(); 4700 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 4701 4702 if (time == 0) { 4703 status = pthread_cond_wait (_cond, _mutex); 4704 } else { 4705 status = pthread_cond_timedwait (_cond, _mutex, &absTime); 4706 } 4707 assert_status(status == 0 || status == EINTR || 4708 status == ETIME || status == ETIMEDOUT, 4709 status, "cond_timedwait"); 4710 4711#ifdef ASSERT 4712 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL); 4713#endif 4714 4715 _counter = 0; 4716 status = pthread_mutex_unlock(_mutex); 4717 assert_status(status == 0, status, "invariant"); 4718 // If externally suspended while waiting, re-suspend 4719 if (jt->handle_special_suspend_equivalent_condition()) { 4720 jt->java_suspend_self(); 4721 } 4722 4723 OrderAccess::fence(); 4724} 4725 4726void Parker::unpark() { 4727 int s, status; 4728 status = pthread_mutex_lock(_mutex); 4729 assert (status == 0, "invariant"); 4730 s = _counter; 4731 _counter = 1; 4732 if (s < 1) { 4733 status = pthread_mutex_unlock(_mutex); 4734 assert (status == 0, "invariant"); 4735 status = pthread_cond_signal (_cond); 4736 assert (status == 0, "invariant"); 4737 } else { 4738 pthread_mutex_unlock(_mutex); 4739 assert (status == 0, "invariant"); 4740 } 4741} 4742 4743extern char** environ; 4744 4745// Run the specified command in a separate process. Return its exit value, 4746// or -1 on failure (e.g. can't fork a new process). 4747// Unlike system(), this function can be called from signal handler. It 4748// doesn't block SIGINT et al. 4749int os::fork_and_exec(char* cmd) { 4750 char * argv[4] = {"sh", "-c", cmd, NULL}; 4751 4752 pid_t pid = fork(); 4753 4754 if (pid < 0) { 4755 // fork failed 4756 return -1; 4757 4758 } else if (pid == 0) { 4759 // child process 4760 4761 // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX. 4762 execve("/usr/bin/sh", argv, environ); 4763 4764 // execve failed 4765 _exit(-1); 4766 4767 } else { 4768 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 4769 // care about the actual exit code, for now. 4770 4771 int status; 4772 4773 // Wait for the child process to exit. This returns immediately if 4774 // the child has already exited. */ 4775 while (waitpid(pid, &status, 0) < 0) { 4776 switch (errno) { 4777 case ECHILD: return 0; 4778 case EINTR: break; 4779 default: return -1; 4780 } 4781 } 4782 4783 if (WIFEXITED(status)) { 4784 // The child exited normally; get its exit code. 4785 return WEXITSTATUS(status); 4786 } else if (WIFSIGNALED(status)) { 4787 // The child exited because of a signal. 4788 // The best value to return is 0x80 + signal number, 4789 // because that is what all Unix shells do, and because 4790 // it allows callers to distinguish between process exit and 4791 // process death by signal. 4792 return 0x80 + WTERMSIG(status); 4793 } else { 4794 // Unknown exit code; pass it through. 4795 return status; 4796 } 4797 } 4798 return -1; 4799} 4800 4801// is_headless_jre() 4802// 4803// Test for the existence of xawt/libmawt.so or libawt_xawt.so 4804// in order to report if we are running in a headless jre. 4805// 4806// Since JDK8 xawt/libmawt.so is moved into the same directory 4807// as libawt.so, and renamed libawt_xawt.so 4808bool os::is_headless_jre() { 4809 struct stat statbuf; 4810 char buf[MAXPATHLEN]; 4811 char libmawtpath[MAXPATHLEN]; 4812 const char *xawtstr = "/xawt/libmawt.so"; 4813 const char *new_xawtstr = "/libawt_xawt.so"; 4814 4815 char *p; 4816 4817 // Get path to libjvm.so 4818 os::jvm_path(buf, sizeof(buf)); 4819 4820 // Get rid of libjvm.so 4821 p = strrchr(buf, '/'); 4822 if (p == NULL) return false; 4823 else *p = '\0'; 4824 4825 // Get rid of client or server 4826 p = strrchr(buf, '/'); 4827 if (p == NULL) return false; 4828 else *p = '\0'; 4829 4830 // check xawt/libmawt.so 4831 strcpy(libmawtpath, buf); 4832 strcat(libmawtpath, xawtstr); 4833 if (::stat(libmawtpath, &statbuf) == 0) return false; 4834 4835 // check libawt_xawt.so 4836 strcpy(libmawtpath, buf); 4837 strcat(libmawtpath, new_xawtstr); 4838 if (::stat(libmawtpath, &statbuf) == 0) return false; 4839 4840 return true; 4841} 4842 4843// Get the default path to the core file 4844// Returns the length of the string 4845int os::get_core_path(char* buffer, size_t bufferSize) { 4846 const char* p = get_current_directory(buffer, bufferSize); 4847 4848 if (p == NULL) { 4849 assert(p != NULL, "failed to get current directory"); 4850 return 0; 4851 } 4852 4853 jio_snprintf(buffer, bufferSize, "%s/core or core.%d", 4854 p, current_process_id()); 4855 4856 return strlen(buffer); 4857} 4858 4859#ifndef PRODUCT 4860void TestReserveMemorySpecial_test() { 4861 // No tests available for this platform 4862} 4863#endif 4864 4865bool os::start_debugging(char *buf, int buflen) { 4866 int len = (int)strlen(buf); 4867 char *p = &buf[len]; 4868 4869 jio_snprintf(p, buflen -len, 4870 "\n\n" 4871 "Do you want to debug the problem?\n\n" 4872 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n" 4873 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n" 4874 "Otherwise, press RETURN to abort...", 4875 os::current_process_id(), 4876 os::current_thread_id(), thread_self()); 4877 4878 bool yes = os::message_box("Unexpected Error", buf); 4879 4880 if (yes) { 4881 // yes, user asked VM to launch debugger 4882 jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id()); 4883 4884 os::fork_and_exec(buf); 4885 yes = false; 4886 } 4887 return yes; 4888} 4889