os_solaris.cpp revision 9314:d3870bf39fae
1/* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25// no precompiled headers 26#include "classfile/classLoader.hpp" 27#include "classfile/systemDictionary.hpp" 28#include "classfile/vmSymbols.hpp" 29#include "code/icBuffer.hpp" 30#include "code/vtableStubs.hpp" 31#include "compiler/compileBroker.hpp" 32#include "compiler/disassembler.hpp" 33#include "interpreter/interpreter.hpp" 34#include "jvm_solaris.h" 35#include "memory/allocation.inline.hpp" 36#include "memory/filemap.hpp" 37#include "mutex_solaris.inline.hpp" 38#include "oops/oop.inline.hpp" 39#include "os_share_solaris.hpp" 40#include "os_solaris.inline.hpp" 41#include "prims/jniFastGetField.hpp" 42#include "prims/jvm.h" 43#include "prims/jvm_misc.hpp" 44#include "runtime/arguments.hpp" 45#include "runtime/atomic.inline.hpp" 46#include "runtime/extendedPC.hpp" 47#include "runtime/globals.hpp" 48#include "runtime/interfaceSupport.hpp" 49#include "runtime/java.hpp" 50#include "runtime/javaCalls.hpp" 51#include "runtime/mutexLocker.hpp" 52#include "runtime/objectMonitor.hpp" 53#include "runtime/orderAccess.inline.hpp" 54#include "runtime/osThread.hpp" 55#include "runtime/perfMemory.hpp" 56#include "runtime/sharedRuntime.hpp" 57#include "runtime/statSampler.hpp" 58#include "runtime/stubRoutines.hpp" 59#include "runtime/thread.inline.hpp" 60#include "runtime/threadCritical.hpp" 61#include "runtime/timer.hpp" 62#include "runtime/vm_version.hpp" 63#include "semaphore_posix.hpp" 64#include "services/attachListener.hpp" 65#include "services/memTracker.hpp" 66#include "services/runtimeService.hpp" 67#include "utilities/decoder.hpp" 68#include "utilities/defaultStream.hpp" 69#include "utilities/events.hpp" 70#include "utilities/growableArray.hpp" 71#include "utilities/vmError.hpp" 72 73// put OS-includes here 74# include <dlfcn.h> 75# include <errno.h> 76# include <exception> 77# include <link.h> 78# include <poll.h> 79# include <pthread.h> 80# include <pwd.h> 81# include <schedctl.h> 82# include <setjmp.h> 83# include <signal.h> 84# include <stdio.h> 85# include <alloca.h> 86# include <sys/filio.h> 87# include <sys/ipc.h> 88# include <sys/lwp.h> 89# include <sys/machelf.h> // for elf Sym structure used by dladdr1 90# include <sys/mman.h> 91# include <sys/processor.h> 92# include <sys/procset.h> 93# include <sys/pset.h> 94# include <sys/resource.h> 95# include <sys/shm.h> 96# include <sys/socket.h> 97# include <sys/stat.h> 98# include <sys/systeminfo.h> 99# include <sys/time.h> 100# include <sys/times.h> 101# include <sys/types.h> 102# include <sys/wait.h> 103# include <sys/utsname.h> 104# include <thread.h> 105# include <unistd.h> 106# include <sys/priocntl.h> 107# include <sys/rtpriocntl.h> 108# include <sys/tspriocntl.h> 109# include <sys/iapriocntl.h> 110# include <sys/fxpriocntl.h> 111# include <sys/loadavg.h> 112# include <string.h> 113# include <stdio.h> 114 115# define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 116# include <sys/procfs.h> // see comment in <sys/procfs.h> 117 118#define MAX_PATH (2 * K) 119 120// for timer info max values which include all bits 121#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 122 123 124// Here are some liblgrp types from sys/lgrp_user.h to be able to 125// compile on older systems without this header file. 126 127#ifndef MADV_ACCESS_LWP 128 #define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 129#endif 130#ifndef MADV_ACCESS_MANY 131 #define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 132#endif 133 134#ifndef LGRP_RSRC_CPU 135 #define LGRP_RSRC_CPU 0 /* CPU resources */ 136#endif 137#ifndef LGRP_RSRC_MEM 138 #define LGRP_RSRC_MEM 1 /* memory resources */ 139#endif 140 141// Values for ThreadPriorityPolicy == 1 142int prio_policy1[CriticalPriority+1] = { 143 -99999, 0, 16, 32, 48, 64, 144 80, 96, 112, 124, 127, 127 }; 145 146// System parameters used internally 147static clock_t clock_tics_per_sec = 100; 148 149// Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+) 150static bool enabled_extended_FILE_stdio = false; 151 152// For diagnostics to print a message once. see run_periodic_checks 153static bool check_addr0_done = false; 154static sigset_t check_signal_done; 155static bool check_signals = true; 156 157address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 158address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 159 160address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 161 162 163// "default" initializers for missing libc APIs 164extern "C" { 165 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 166 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 167 168 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 169 static int lwp_cond_destroy(cond_t *cv) { return 0; } 170} 171 172// "default" initializers for pthread-based synchronization 173extern "C" { 174 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 175 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 176} 177 178static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); 179 180static inline size_t adjust_stack_size(address base, size_t size) { 181 if ((ssize_t)size < 0) { 182 // 4759953: Compensate for ridiculous stack size. 183 size = max_intx; 184 } 185 if (size > (size_t)base) { 186 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 187 size = (size_t)base; 188 } 189 return size; 190} 191 192static inline stack_t get_stack_info() { 193 stack_t st; 194 int retval = thr_stksegment(&st); 195 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 196 assert(retval == 0, "incorrect return value from thr_stksegment"); 197 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 198 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 199 return st; 200} 201 202address os::current_stack_base() { 203 int r = thr_main(); 204 guarantee(r == 0 || r == 1, "CR6501650 or CR6493689"); 205 bool is_primordial_thread = r; 206 207 // Workaround 4352906, avoid calls to thr_stksegment by 208 // thr_main after the first one (it looks like we trash 209 // some data, causing the value for ss_sp to be incorrect). 210 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 211 stack_t st = get_stack_info(); 212 if (is_primordial_thread) { 213 // cache initial value of stack base 214 os::Solaris::_main_stack_base = (address)st.ss_sp; 215 } 216 return (address)st.ss_sp; 217 } else { 218 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 219 return os::Solaris::_main_stack_base; 220 } 221} 222 223size_t os::current_stack_size() { 224 size_t size; 225 226 int r = thr_main(); 227 guarantee(r == 0 || r == 1, "CR6501650 or CR6493689"); 228 if (!r) { 229 size = get_stack_info().ss_size; 230 } else { 231 struct rlimit limits; 232 getrlimit(RLIMIT_STACK, &limits); 233 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 234 } 235 // base may not be page aligned 236 address base = current_stack_base(); 237 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 238 return (size_t)(base - bottom); 239} 240 241struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 242 return localtime_r(clock, res); 243} 244 245void os::Solaris::try_enable_extended_io() { 246 typedef int (*enable_extended_FILE_stdio_t)(int, int); 247 248 if (!UseExtendedFileIO) { 249 return; 250 } 251 252 enable_extended_FILE_stdio_t enabler = 253 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 254 "enable_extended_FILE_stdio"); 255 if (enabler) { 256 enabler(-1, -1); 257 } 258} 259 260static int _processors_online = 0; 261 262jint os::Solaris::_os_thread_limit = 0; 263volatile jint os::Solaris::_os_thread_count = 0; 264 265julong os::available_memory() { 266 return Solaris::available_memory(); 267} 268 269julong os::Solaris::available_memory() { 270 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 271} 272 273julong os::Solaris::_physical_memory = 0; 274 275julong os::physical_memory() { 276 return Solaris::physical_memory(); 277} 278 279static hrtime_t first_hrtime = 0; 280static const hrtime_t hrtime_hz = 1000*1000*1000; 281static volatile hrtime_t max_hrtime = 0; 282 283 284void os::Solaris::initialize_system_info() { 285 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 286 _processors_online = sysconf(_SC_NPROCESSORS_ONLN); 287 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * 288 (julong)sysconf(_SC_PAGESIZE); 289} 290 291int os::active_processor_count() { 292 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 293 pid_t pid = getpid(); 294 psetid_t pset = PS_NONE; 295 // Are we running in a processor set or is there any processor set around? 296 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 297 uint_t pset_cpus; 298 // Query the number of cpus available to us. 299 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 300 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 301 _processors_online = pset_cpus; 302 return pset_cpus; 303 } 304 } 305 // Otherwise return number of online cpus 306 return online_cpus; 307} 308 309static bool find_processors_in_pset(psetid_t pset, 310 processorid_t** id_array, 311 uint_t* id_length) { 312 bool result = false; 313 // Find the number of processors in the processor set. 314 if (pset_info(pset, NULL, id_length, NULL) == 0) { 315 // Make up an array to hold their ids. 316 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 317 // Fill in the array with their processor ids. 318 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 319 result = true; 320 } 321 } 322 return result; 323} 324 325// Callers of find_processors_online() must tolerate imprecise results -- 326// the system configuration can change asynchronously because of DR 327// or explicit psradm operations. 328// 329// We also need to take care that the loop (below) terminates as the 330// number of processors online can change between the _SC_NPROCESSORS_ONLN 331// request and the loop that builds the list of processor ids. Unfortunately 332// there's no reliable way to determine the maximum valid processor id, 333// so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 334// man pages, which claim the processor id set is "sparse, but 335// not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 336// exit the loop. 337// 338// In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 339// not available on S8.0. 340 341static bool find_processors_online(processorid_t** id_array, 342 uint* id_length) { 343 const processorid_t MAX_PROCESSOR_ID = 100000; 344 // Find the number of processors online. 345 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 346 // Make up an array to hold their ids. 347 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 348 // Processors need not be numbered consecutively. 349 long found = 0; 350 processorid_t next = 0; 351 while (found < *id_length && next < MAX_PROCESSOR_ID) { 352 processor_info_t info; 353 if (processor_info(next, &info) == 0) { 354 // NB, PI_NOINTR processors are effectively online ... 355 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 356 (*id_array)[found] = next; 357 found += 1; 358 } 359 } 360 next += 1; 361 } 362 if (found < *id_length) { 363 // The loop above didn't identify the expected number of processors. 364 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 365 // and re-running the loop, above, but there's no guarantee of progress 366 // if the system configuration is in flux. Instead, we just return what 367 // we've got. Note that in the worst case find_processors_online() could 368 // return an empty set. (As a fall-back in the case of the empty set we 369 // could just return the ID of the current processor). 370 *id_length = found; 371 } 372 373 return true; 374} 375 376static bool assign_distribution(processorid_t* id_array, 377 uint id_length, 378 uint* distribution, 379 uint distribution_length) { 380 // We assume we can assign processorid_t's to uint's. 381 assert(sizeof(processorid_t) == sizeof(uint), 382 "can't convert processorid_t to uint"); 383 // Quick check to see if we won't succeed. 384 if (id_length < distribution_length) { 385 return false; 386 } 387 // Assign processor ids to the distribution. 388 // Try to shuffle processors to distribute work across boards, 389 // assuming 4 processors per board. 390 const uint processors_per_board = ProcessDistributionStride; 391 // Find the maximum processor id. 392 processorid_t max_id = 0; 393 for (uint m = 0; m < id_length; m += 1) { 394 max_id = MAX2(max_id, id_array[m]); 395 } 396 // The next id, to limit loops. 397 const processorid_t limit_id = max_id + 1; 398 // Make up markers for available processors. 399 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal); 400 for (uint c = 0; c < limit_id; c += 1) { 401 available_id[c] = false; 402 } 403 for (uint a = 0; a < id_length; a += 1) { 404 available_id[id_array[a]] = true; 405 } 406 // Step by "boards", then by "slot", copying to "assigned". 407 // NEEDS_CLEANUP: The assignment of processors should be stateful, 408 // remembering which processors have been assigned by 409 // previous calls, etc., so as to distribute several 410 // independent calls of this method. What we'd like is 411 // It would be nice to have an API that let us ask 412 // how many processes are bound to a processor, 413 // but we don't have that, either. 414 // In the short term, "board" is static so that 415 // subsequent distributions don't all start at board 0. 416 static uint board = 0; 417 uint assigned = 0; 418 // Until we've found enough processors .... 419 while (assigned < distribution_length) { 420 // ... find the next available processor in the board. 421 for (uint slot = 0; slot < processors_per_board; slot += 1) { 422 uint try_id = board * processors_per_board + slot; 423 if ((try_id < limit_id) && (available_id[try_id] == true)) { 424 distribution[assigned] = try_id; 425 available_id[try_id] = false; 426 assigned += 1; 427 break; 428 } 429 } 430 board += 1; 431 if (board * processors_per_board + 0 >= limit_id) { 432 board = 0; 433 } 434 } 435 if (available_id != NULL) { 436 FREE_C_HEAP_ARRAY(bool, available_id); 437 } 438 return true; 439} 440 441void os::set_native_thread_name(const char *name) { 442 // Not yet implemented. 443 return; 444} 445 446bool os::distribute_processes(uint length, uint* distribution) { 447 bool result = false; 448 // Find the processor id's of all the available CPUs. 449 processorid_t* id_array = NULL; 450 uint id_length = 0; 451 // There are some races between querying information and using it, 452 // since processor sets can change dynamically. 453 psetid_t pset = PS_NONE; 454 // Are we running in a processor set? 455 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 456 result = find_processors_in_pset(pset, &id_array, &id_length); 457 } else { 458 result = find_processors_online(&id_array, &id_length); 459 } 460 if (result == true) { 461 if (id_length >= length) { 462 result = assign_distribution(id_array, id_length, distribution, length); 463 } else { 464 result = false; 465 } 466 } 467 if (id_array != NULL) { 468 FREE_C_HEAP_ARRAY(processorid_t, id_array); 469 } 470 return result; 471} 472 473bool os::bind_to_processor(uint processor_id) { 474 // We assume that a processorid_t can be stored in a uint. 475 assert(sizeof(uint) == sizeof(processorid_t), 476 "can't convert uint to processorid_t"); 477 int bind_result = 478 processor_bind(P_LWPID, // bind LWP. 479 P_MYID, // bind current LWP. 480 (processorid_t) processor_id, // id. 481 NULL); // don't return old binding. 482 return (bind_result == 0); 483} 484 485// Return true if user is running as root. 486 487bool os::have_special_privileges() { 488 static bool init = false; 489 static bool privileges = false; 490 if (!init) { 491 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 492 init = true; 493 } 494 return privileges; 495} 496 497 498void os::init_system_properties_values() { 499 // The next steps are taken in the product version: 500 // 501 // Obtain the JAVA_HOME value from the location of libjvm.so. 502 // This library should be located at: 503 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so. 504 // 505 // If "/jre/lib/" appears at the right place in the path, then we 506 // assume libjvm.so is installed in a JDK and we use this path. 507 // 508 // Otherwise exit with message: "Could not create the Java virtual machine." 509 // 510 // The following extra steps are taken in the debugging version: 511 // 512 // If "/jre/lib/" does NOT appear at the right place in the path 513 // instead of exit check for $JAVA_HOME environment variable. 514 // 515 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 516 // then we append a fake suffix "hotspot/libjvm.so" to this path so 517 // it looks like libjvm.so is installed there 518 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so. 519 // 520 // Otherwise exit. 521 // 522 // Important note: if the location of libjvm.so changes this 523 // code needs to be changed accordingly. 524 525// Base path of extensions installed on the system. 526#define SYS_EXT_DIR "/usr/jdk/packages" 527#define EXTENSIONS_DIR "/lib/ext" 528 529 char cpu_arch[12]; 530 // Buffer that fits several sprintfs. 531 // Note that the space for the colon and the trailing null are provided 532 // by the nulls included by the sizeof operator. 533 const size_t bufsize = 534 MAX3((size_t)MAXPATHLEN, // For dll_dir & friends. 535 sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path 536 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir 537 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 538 539 // sysclasspath, java_home, dll_dir 540 { 541 char *pslash; 542 os::jvm_path(buf, bufsize); 543 544 // Found the full path to libjvm.so. 545 // Now cut the path to <java_home>/jre if we can. 546 *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so. 547 pslash = strrchr(buf, '/'); 548 if (pslash != NULL) { 549 *pslash = '\0'; // Get rid of /{client|server|hotspot}. 550 } 551 Arguments::set_dll_dir(buf); 552 553 if (pslash != NULL) { 554 pslash = strrchr(buf, '/'); 555 if (pslash != NULL) { 556 *pslash = '\0'; // Get rid of /<arch>. 557 pslash = strrchr(buf, '/'); 558 if (pslash != NULL) { 559 *pslash = '\0'; // Get rid of /lib. 560 } 561 } 562 } 563 Arguments::set_java_home(buf); 564 set_boot_path('/', ':'); 565 } 566 567 // Where to look for native libraries. 568 { 569 // Use dlinfo() to determine the correct java.library.path. 570 // 571 // If we're launched by the Java launcher, and the user 572 // does not set java.library.path explicitly on the commandline, 573 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 574 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 575 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 576 // /usr/lib), which is exactly what we want. 577 // 578 // If the user does set java.library.path, it completely 579 // overwrites this setting, and always has. 580 // 581 // If we're not launched by the Java launcher, we may 582 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 583 // settings. Again, dlinfo does exactly what we want. 584 585 Dl_serinfo info_sz, *info = &info_sz; 586 Dl_serpath *path; 587 char *library_path; 588 char *common_path = buf; 589 590 // Determine search path count and required buffer size. 591 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 592 FREE_C_HEAP_ARRAY(char, buf); 593 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 594 } 595 596 // Allocate new buffer and initialize. 597 info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal); 598 info->dls_size = info_sz.dls_size; 599 info->dls_cnt = info_sz.dls_cnt; 600 601 // Obtain search path information. 602 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 603 FREE_C_HEAP_ARRAY(char, buf); 604 FREE_C_HEAP_ARRAY(char, info); 605 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 606 } 607 608 path = &info->dls_serpath[0]; 609 610 // Note: Due to a legacy implementation, most of the library path 611 // is set in the launcher. This was to accomodate linking restrictions 612 // on legacy Solaris implementations (which are no longer supported). 613 // Eventually, all the library path setting will be done here. 614 // 615 // However, to prevent the proliferation of improperly built native 616 // libraries, the new path component /usr/jdk/packages is added here. 617 618 // Determine the actual CPU architecture. 619 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 620#ifdef _LP64 621 // If we are a 64-bit vm, perform the following translations: 622 // sparc -> sparcv9 623 // i386 -> amd64 624 if (strcmp(cpu_arch, "sparc") == 0) { 625 strcat(cpu_arch, "v9"); 626 } else if (strcmp(cpu_arch, "i386") == 0) { 627 strcpy(cpu_arch, "amd64"); 628 } 629#endif 630 631 // Construct the invariant part of ld_library_path. 632 sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch); 633 634 // Struct size is more than sufficient for the path components obtained 635 // through the dlinfo() call, so only add additional space for the path 636 // components explicitly added here. 637 size_t library_path_size = info->dls_size + strlen(common_path); 638 library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal); 639 library_path[0] = '\0'; 640 641 // Construct the desired Java library path from the linker's library 642 // search path. 643 // 644 // For compatibility, it is optimal that we insert the additional path 645 // components specific to the Java VM after those components specified 646 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 647 // infrastructure. 648 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it. 649 strcpy(library_path, common_path); 650 } else { 651 int inserted = 0; 652 int i; 653 for (i = 0; i < info->dls_cnt; i++, path++) { 654 uint_t flags = path->dls_flags & LA_SER_MASK; 655 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 656 strcat(library_path, common_path); 657 strcat(library_path, os::path_separator()); 658 inserted = 1; 659 } 660 strcat(library_path, path->dls_name); 661 strcat(library_path, os::path_separator()); 662 } 663 // Eliminate trailing path separator. 664 library_path[strlen(library_path)-1] = '\0'; 665 } 666 667 // happens before argument parsing - can't use a trace flag 668 // tty->print_raw("init_system_properties_values: native lib path: "); 669 // tty->print_raw_cr(library_path); 670 671 // Callee copies into its own buffer. 672 Arguments::set_library_path(library_path); 673 674 FREE_C_HEAP_ARRAY(char, library_path); 675 FREE_C_HEAP_ARRAY(char, info); 676 } 677 678 // Extensions directories. 679 sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home()); 680 Arguments::set_ext_dirs(buf); 681 682 FREE_C_HEAP_ARRAY(char, buf); 683 684#undef SYS_EXT_DIR 685#undef EXTENSIONS_DIR 686} 687 688void os::breakpoint() { 689 BREAKPOINT; 690} 691 692bool os::obsolete_option(const JavaVMOption *option) { 693 if (!strncmp(option->optionString, "-Xt", 3)) { 694 return true; 695 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 696 return true; 697 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 698 return true; 699 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 700 return true; 701 } 702 return false; 703} 704 705bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 706 address stackStart = (address)thread->stack_base(); 707 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 708 if (sp < stackStart && sp >= stackEnd) return true; 709 return false; 710} 711 712extern "C" void breakpoint() { 713 // use debugger to set breakpoint here 714} 715 716static thread_t main_thread; 717 718// Thread start routine for all new Java threads 719extern "C" void* java_start(void* thread_addr) { 720 // Try to randomize the cache line index of hot stack frames. 721 // This helps when threads of the same stack traces evict each other's 722 // cache lines. The threads can be either from the same JVM instance, or 723 // from different JVM instances. The benefit is especially true for 724 // processors with hyperthreading technology. 725 static int counter = 0; 726 int pid = os::current_process_id(); 727 alloca(((pid ^ counter++) & 7) * 128); 728 729 int prio; 730 Thread* thread = (Thread*)thread_addr; 731 OSThread* osthr = thread->osthread(); 732 733 osthr->set_lwp_id(_lwp_self()); // Store lwp in case we are bound 734 thread->_schedctl = (void *) schedctl_init(); 735 736 if (UseNUMA) { 737 int lgrp_id = os::numa_get_group_id(); 738 if (lgrp_id != -1) { 739 thread->set_lgrp_id(lgrp_id); 740 } 741 } 742 743 // If the creator called set priority before we started, 744 // we need to call set_native_priority now that we have an lwp. 745 // We used to get the priority from thr_getprio (we called 746 // thr_setprio way back in create_thread) and pass it to 747 // set_native_priority, but Solaris scales the priority 748 // in java_to_os_priority, so when we read it back here, 749 // we pass trash to set_native_priority instead of what's 750 // in java_to_os_priority. So we save the native priority 751 // in the osThread and recall it here. 752 753 if (osthr->thread_id() != -1) { 754 if (UseThreadPriorities) { 755 int prio = osthr->native_priority(); 756 if (ThreadPriorityVerbose) { 757 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " 758 INTPTR_FORMAT ", setting priority: %d\n", 759 osthr->thread_id(), osthr->lwp_id(), prio); 760 } 761 os::set_native_priority(thread, prio); 762 } 763 } else if (ThreadPriorityVerbose) { 764 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 765 } 766 767 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 768 769 // initialize signal mask for this thread 770 os::Solaris::hotspot_sigmask(thread); 771 772 thread->run(); 773 774 // One less thread is executing 775 // When the VMThread gets here, the main thread may have already exited 776 // which frees the CodeHeap containing the Atomic::dec code 777 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 778 Atomic::dec(&os::Solaris::_os_thread_count); 779 } 780 781 if (UseDetachedThreads) { 782 thr_exit(NULL); 783 ShouldNotReachHere(); 784 } 785 return NULL; 786} 787 788static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 789 // Allocate the OSThread object 790 OSThread* osthread = new OSThread(NULL, NULL); 791 if (osthread == NULL) return NULL; 792 793 // Store info on the Solaris thread into the OSThread 794 osthread->set_thread_id(thread_id); 795 osthread->set_lwp_id(_lwp_self()); 796 thread->_schedctl = (void *) schedctl_init(); 797 798 if (UseNUMA) { 799 int lgrp_id = os::numa_get_group_id(); 800 if (lgrp_id != -1) { 801 thread->set_lgrp_id(lgrp_id); 802 } 803 } 804 805 if (ThreadPriorityVerbose) { 806 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 807 osthread->thread_id(), osthread->lwp_id()); 808 } 809 810 // Initial thread state is INITIALIZED, not SUSPENDED 811 osthread->set_state(INITIALIZED); 812 813 return osthread; 814} 815 816void os::Solaris::hotspot_sigmask(Thread* thread) { 817 //Save caller's signal mask 818 sigset_t sigmask; 819 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 820 OSThread *osthread = thread->osthread(); 821 osthread->set_caller_sigmask(sigmask); 822 823 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 824 if (!ReduceSignalUsage) { 825 if (thread->is_VM_thread()) { 826 // Only the VM thread handles BREAK_SIGNAL ... 827 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 828 } else { 829 // ... all other threads block BREAK_SIGNAL 830 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 831 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 832 } 833 } 834} 835 836bool os::create_attached_thread(JavaThread* thread) { 837#ifdef ASSERT 838 thread->verify_not_published(); 839#endif 840 OSThread* osthread = create_os_thread(thread, thr_self()); 841 if (osthread == NULL) { 842 return false; 843 } 844 845 // Initial thread state is RUNNABLE 846 osthread->set_state(RUNNABLE); 847 thread->set_osthread(osthread); 848 849 // initialize signal mask for this thread 850 // and save the caller's signal mask 851 os::Solaris::hotspot_sigmask(thread); 852 853 return true; 854} 855 856bool os::create_main_thread(JavaThread* thread) { 857#ifdef ASSERT 858 thread->verify_not_published(); 859#endif 860 if (_starting_thread == NULL) { 861 _starting_thread = create_os_thread(thread, main_thread); 862 if (_starting_thread == NULL) { 863 return false; 864 } 865 } 866 867 // The primodial thread is runnable from the start 868 _starting_thread->set_state(RUNNABLE); 869 870 thread->set_osthread(_starting_thread); 871 872 // initialize signal mask for this thread 873 // and save the caller's signal mask 874 os::Solaris::hotspot_sigmask(thread); 875 876 return true; 877} 878 879 880bool os::create_thread(Thread* thread, ThreadType thr_type, 881 size_t stack_size) { 882 // Allocate the OSThread object 883 OSThread* osthread = new OSThread(NULL, NULL); 884 if (osthread == NULL) { 885 return false; 886 } 887 888 if (ThreadPriorityVerbose) { 889 char *thrtyp; 890 switch (thr_type) { 891 case vm_thread: 892 thrtyp = (char *)"vm"; 893 break; 894 case cgc_thread: 895 thrtyp = (char *)"cgc"; 896 break; 897 case pgc_thread: 898 thrtyp = (char *)"pgc"; 899 break; 900 case java_thread: 901 thrtyp = (char *)"java"; 902 break; 903 case compiler_thread: 904 thrtyp = (char *)"compiler"; 905 break; 906 case watcher_thread: 907 thrtyp = (char *)"watcher"; 908 break; 909 default: 910 thrtyp = (char *)"unknown"; 911 break; 912 } 913 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 914 } 915 916 // Calculate stack size if it's not specified by caller. 917 if (stack_size == 0) { 918 // The default stack size 1M (2M for LP64). 919 stack_size = (BytesPerWord >> 2) * K * K; 920 921 switch (thr_type) { 922 case os::java_thread: 923 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 924 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 925 break; 926 case os::compiler_thread: 927 if (CompilerThreadStackSize > 0) { 928 stack_size = (size_t)(CompilerThreadStackSize * K); 929 break; 930 } // else fall through: 931 // use VMThreadStackSize if CompilerThreadStackSize is not defined 932 case os::vm_thread: 933 case os::pgc_thread: 934 case os::cgc_thread: 935 case os::watcher_thread: 936 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 937 break; 938 } 939 } 940 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 941 942 // Initial state is ALLOCATED but not INITIALIZED 943 osthread->set_state(ALLOCATED); 944 945 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 946 // We got lots of threads. Check if we still have some address space left. 947 // Need to be at least 5Mb of unreserved address space. We do check by 948 // trying to reserve some. 949 const size_t VirtualMemoryBangSize = 20*K*K; 950 char* mem = os::reserve_memory(VirtualMemoryBangSize); 951 if (mem == NULL) { 952 delete osthread; 953 return false; 954 } else { 955 // Release the memory again 956 os::release_memory(mem, VirtualMemoryBangSize); 957 } 958 } 959 960 // Setup osthread because the child thread may need it. 961 thread->set_osthread(osthread); 962 963 // Create the Solaris thread 964 thread_t tid = 0; 965 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED; 966 int status; 967 968 // Mark that we don't have an lwp or thread id yet. 969 // In case we attempt to set the priority before the thread starts. 970 osthread->set_lwp_id(-1); 971 osthread->set_thread_id(-1); 972 973 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 974 if (status != 0) { 975 if (PrintMiscellaneous && (Verbose || WizardMode)) { 976 perror("os::create_thread"); 977 } 978 thread->set_osthread(NULL); 979 // Need to clean up stuff we've allocated so far 980 delete osthread; 981 return false; 982 } 983 984 Atomic::inc(&os::Solaris::_os_thread_count); 985 986 // Store info on the Solaris thread into the OSThread 987 osthread->set_thread_id(tid); 988 989 // Remember that we created this thread so we can set priority on it 990 osthread->set_vm_created(); 991 992 // Initial thread state is INITIALIZED, not SUSPENDED 993 osthread->set_state(INITIALIZED); 994 995 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 996 return true; 997} 998 999// defined for >= Solaris 10. This allows builds on earlier versions 1000// of Solaris to take advantage of the newly reserved Solaris JVM signals 1001// With SIGJVM1, SIGJVM2, ASYNC_SIGNAL is SIGJVM2 and -XX:+UseAltSigs does 1002// nothing since these should have no conflict. Previously INTERRUPT_SIGNAL 1003// was SIGJVM1. 1004// 1005#if !defined(SIGJVM1) 1006 #define SIGJVM1 39 1007 #define SIGJVM2 40 1008#endif 1009 1010debug_only(static bool signal_sets_initialized = false); 1011static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1012 1013int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1014 1015bool os::Solaris::is_sig_ignored(int sig) { 1016 struct sigaction oact; 1017 sigaction(sig, (struct sigaction*)NULL, &oact); 1018 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1019 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1020 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) { 1021 return true; 1022 } else { 1023 return false; 1024 } 1025} 1026 1027// Note: SIGRTMIN is a macro that calls sysconf() so it will 1028// dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1029static bool isJVM1available() { 1030 return SIGJVM1 < SIGRTMIN; 1031} 1032 1033void os::Solaris::signal_sets_init() { 1034 // Should also have an assertion stating we are still single-threaded. 1035 assert(!signal_sets_initialized, "Already initialized"); 1036 // Fill in signals that are necessarily unblocked for all threads in 1037 // the VM. Currently, we unblock the following signals: 1038 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1039 // by -Xrs (=ReduceSignalUsage)); 1040 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1041 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1042 // the dispositions or masks wrt these signals. 1043 // Programs embedding the VM that want to use the above signals for their 1044 // own purposes must, at this time, use the "-Xrs" option to prevent 1045 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1046 // (See bug 4345157, and other related bugs). 1047 // In reality, though, unblocking these signals is really a nop, since 1048 // these signals are not blocked by default. 1049 sigemptyset(&unblocked_sigs); 1050 sigemptyset(&allowdebug_blocked_sigs); 1051 sigaddset(&unblocked_sigs, SIGILL); 1052 sigaddset(&unblocked_sigs, SIGSEGV); 1053 sigaddset(&unblocked_sigs, SIGBUS); 1054 sigaddset(&unblocked_sigs, SIGFPE); 1055 1056 if (isJVM1available) { 1057 os::Solaris::set_SIGasync(SIGJVM2); 1058 } else if (UseAltSigs) { 1059 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1060 } else { 1061 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1062 } 1063 1064 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1065 1066 if (!ReduceSignalUsage) { 1067 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1068 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1069 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1070 } 1071 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1072 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1073 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1074 } 1075 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1076 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1077 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1078 } 1079 } 1080 // Fill in signals that are blocked by all but the VM thread. 1081 sigemptyset(&vm_sigs); 1082 if (!ReduceSignalUsage) { 1083 sigaddset(&vm_sigs, BREAK_SIGNAL); 1084 } 1085 debug_only(signal_sets_initialized = true); 1086 1087 // For diagnostics only used in run_periodic_checks 1088 sigemptyset(&check_signal_done); 1089} 1090 1091// These are signals that are unblocked while a thread is running Java. 1092// (For some reason, they get blocked by default.) 1093sigset_t* os::Solaris::unblocked_signals() { 1094 assert(signal_sets_initialized, "Not initialized"); 1095 return &unblocked_sigs; 1096} 1097 1098// These are the signals that are blocked while a (non-VM) thread is 1099// running Java. Only the VM thread handles these signals. 1100sigset_t* os::Solaris::vm_signals() { 1101 assert(signal_sets_initialized, "Not initialized"); 1102 return &vm_sigs; 1103} 1104 1105// These are signals that are blocked during cond_wait to allow debugger in 1106sigset_t* os::Solaris::allowdebug_blocked_signals() { 1107 assert(signal_sets_initialized, "Not initialized"); 1108 return &allowdebug_blocked_sigs; 1109} 1110 1111 1112void _handle_uncaught_cxx_exception() { 1113 VMError::report_and_die("An uncaught C++ exception"); 1114} 1115 1116 1117// First crack at OS-specific initialization, from inside the new thread. 1118void os::initialize_thread(Thread* thr) { 1119 int r = thr_main(); 1120 guarantee(r == 0 || r == 1, "CR6501650 or CR6493689"); 1121 if (r) { 1122 JavaThread* jt = (JavaThread *)thr; 1123 assert(jt != NULL, "Sanity check"); 1124 size_t stack_size; 1125 address base = jt->stack_base(); 1126 if (Arguments::created_by_java_launcher()) { 1127 // Use 2MB to allow for Solaris 7 64 bit mode. 1128 stack_size = JavaThread::stack_size_at_create() == 0 1129 ? 2048*K : JavaThread::stack_size_at_create(); 1130 1131 // There are rare cases when we may have already used more than 1132 // the basic stack size allotment before this method is invoked. 1133 // Attempt to allow for a normally sized java_stack. 1134 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1135 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1136 } else { 1137 // 6269555: If we were not created by a Java launcher, i.e. if we are 1138 // running embedded in a native application, treat the primordial thread 1139 // as much like a native attached thread as possible. This means using 1140 // the current stack size from thr_stksegment(), unless it is too large 1141 // to reliably setup guard pages. A reasonable max size is 8MB. 1142 size_t current_size = current_stack_size(); 1143 // This should never happen, but just in case.... 1144 if (current_size == 0) current_size = 2 * K * K; 1145 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1146 } 1147 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1148 stack_size = (size_t)(base - bottom); 1149 1150 assert(stack_size > 0, "Stack size calculation problem"); 1151 1152 if (stack_size > jt->stack_size()) { 1153#ifndef PRODUCT 1154 struct rlimit limits; 1155 getrlimit(RLIMIT_STACK, &limits); 1156 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1157 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1158#endif 1159 tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n" 1160 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1161 "See limit(1) to increase the stack size limit.", 1162 stack_size / K, jt->stack_size() / K); 1163 vm_exit(1); 1164 } 1165 assert(jt->stack_size() >= stack_size, 1166 "Attempt to map more stack than was allocated"); 1167 jt->set_stack_size(stack_size); 1168 } 1169 1170 // With the T2 libthread (T1 is no longer supported) threads are always bound 1171 // and we use stackbanging in all cases. 1172 1173 os::Solaris::init_thread_fpu_state(); 1174 std::set_terminate(_handle_uncaught_cxx_exception); 1175} 1176 1177 1178 1179// Free Solaris resources related to the OSThread 1180void os::free_thread(OSThread* osthread) { 1181 assert(osthread != NULL, "os::free_thread but osthread not set"); 1182 1183 1184 // We are told to free resources of the argument thread, 1185 // but we can only really operate on the current thread. 1186 // The main thread must take the VMThread down synchronously 1187 // before the main thread exits and frees up CodeHeap 1188 guarantee((Thread::current()->osthread() == osthread 1189 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1190 if (Thread::current()->osthread() == osthread) { 1191 // Restore caller's signal mask 1192 sigset_t sigmask = osthread->caller_sigmask(); 1193 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1194 } 1195 delete osthread; 1196} 1197 1198void os::pd_start_thread(Thread* thread) { 1199 int status = thr_continue(thread->osthread()->thread_id()); 1200 assert_status(status == 0, status, "thr_continue failed"); 1201} 1202 1203 1204intx os::current_thread_id() { 1205 return (intx)thr_self(); 1206} 1207 1208static pid_t _initial_pid = 0; 1209 1210int os::current_process_id() { 1211 return (int)(_initial_pid ? _initial_pid : getpid()); 1212} 1213 1214// gethrtime() should be monotonic according to the documentation, 1215// but some virtualized platforms are known to break this guarantee. 1216// getTimeNanos() must be guaranteed not to move backwards, so we 1217// are forced to add a check here. 1218inline hrtime_t getTimeNanos() { 1219 const hrtime_t now = gethrtime(); 1220 const hrtime_t prev = max_hrtime; 1221 if (now <= prev) { 1222 return prev; // same or retrograde time; 1223 } 1224 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1225 assert(obsv >= prev, "invariant"); // Monotonicity 1226 // If the CAS succeeded then we're done and return "now". 1227 // If the CAS failed and the observed value "obsv" is >= now then 1228 // we should return "obsv". If the CAS failed and now > obsv > prv then 1229 // some other thread raced this thread and installed a new value, in which case 1230 // we could either (a) retry the entire operation, (b) retry trying to install now 1231 // or (c) just return obsv. We use (c). No loop is required although in some cases 1232 // we might discard a higher "now" value in deference to a slightly lower but freshly 1233 // installed obsv value. That's entirely benign -- it admits no new orderings compared 1234 // to (a) or (b) -- and greatly reduces coherence traffic. 1235 // We might also condition (c) on the magnitude of the delta between obsv and now. 1236 // Avoiding excessive CAS operations to hot RW locations is critical. 1237 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate 1238 return (prev == obsv) ? now : obsv; 1239} 1240 1241// Time since start-up in seconds to a fine granularity. 1242// Used by VMSelfDestructTimer and the MemProfiler. 1243double os::elapsedTime() { 1244 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1245} 1246 1247jlong os::elapsed_counter() { 1248 return (jlong)(getTimeNanos() - first_hrtime); 1249} 1250 1251jlong os::elapsed_frequency() { 1252 return hrtime_hz; 1253} 1254 1255// Return the real, user, and system times in seconds from an 1256// arbitrary fixed point in the past. 1257bool os::getTimesSecs(double* process_real_time, 1258 double* process_user_time, 1259 double* process_system_time) { 1260 struct tms ticks; 1261 clock_t real_ticks = times(&ticks); 1262 1263 if (real_ticks == (clock_t) (-1)) { 1264 return false; 1265 } else { 1266 double ticks_per_second = (double) clock_tics_per_sec; 1267 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1268 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1269 // For consistency return the real time from getTimeNanos() 1270 // converted to seconds. 1271 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1272 1273 return true; 1274 } 1275} 1276 1277bool os::supports_vtime() { return true; } 1278 1279bool os::enable_vtime() { 1280 int fd = ::open("/proc/self/ctl", O_WRONLY); 1281 if (fd == -1) { 1282 return false; 1283 } 1284 1285 long cmd[] = { PCSET, PR_MSACCT }; 1286 int res = ::write(fd, cmd, sizeof(long) * 2); 1287 ::close(fd); 1288 if (res != sizeof(long) * 2) { 1289 return false; 1290 } 1291 return true; 1292} 1293 1294bool os::vtime_enabled() { 1295 int fd = ::open("/proc/self/status", O_RDONLY); 1296 if (fd == -1) { 1297 return false; 1298 } 1299 1300 pstatus_t status; 1301 int res = os::read(fd, (void*) &status, sizeof(pstatus_t)); 1302 ::close(fd); 1303 if (res != sizeof(pstatus_t)) { 1304 return false; 1305 } 1306 return status.pr_flags & PR_MSACCT; 1307} 1308 1309double os::elapsedVTime() { 1310 return (double)gethrvtime() / (double)hrtime_hz; 1311} 1312 1313// Used internally for comparisons only 1314// getTimeMillis guaranteed to not move backwards on Solaris 1315jlong getTimeMillis() { 1316 jlong nanotime = getTimeNanos(); 1317 return (jlong)(nanotime / NANOSECS_PER_MILLISEC); 1318} 1319 1320// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1321jlong os::javaTimeMillis() { 1322 timeval t; 1323 if (gettimeofday(&t, NULL) == -1) { 1324 fatal("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)); 1325 } 1326 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1327} 1328 1329void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 1330 timeval t; 1331 if (gettimeofday(&t, NULL) == -1) { 1332 fatal("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno)); 1333 } 1334 seconds = jlong(t.tv_sec); 1335 nanos = jlong(t.tv_usec) * 1000; 1336} 1337 1338 1339jlong os::javaTimeNanos() { 1340 return (jlong)getTimeNanos(); 1341} 1342 1343void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1344 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1345 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1346 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1347 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1348} 1349 1350char * os::local_time_string(char *buf, size_t buflen) { 1351 struct tm t; 1352 time_t long_time; 1353 time(&long_time); 1354 localtime_r(&long_time, &t); 1355 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1356 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1357 t.tm_hour, t.tm_min, t.tm_sec); 1358 return buf; 1359} 1360 1361// Note: os::shutdown() might be called very early during initialization, or 1362// called from signal handler. Before adding something to os::shutdown(), make 1363// sure it is async-safe and can handle partially initialized VM. 1364void os::shutdown() { 1365 1366 // allow PerfMemory to attempt cleanup of any persistent resources 1367 perfMemory_exit(); 1368 1369 // needs to remove object in file system 1370 AttachListener::abort(); 1371 1372 // flush buffered output, finish log files 1373 ostream_abort(); 1374 1375 // Check for abort hook 1376 abort_hook_t abort_hook = Arguments::abort_hook(); 1377 if (abort_hook != NULL) { 1378 abort_hook(); 1379 } 1380} 1381 1382// Note: os::abort() might be called very early during initialization, or 1383// called from signal handler. Before adding something to os::abort(), make 1384// sure it is async-safe and can handle partially initialized VM. 1385void os::abort(bool dump_core, void* siginfo, void* context) { 1386 os::shutdown(); 1387 if (dump_core) { 1388#ifndef PRODUCT 1389 fdStream out(defaultStream::output_fd()); 1390 out.print_raw("Current thread is "); 1391 char buf[16]; 1392 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1393 out.print_raw_cr(buf); 1394 out.print_raw_cr("Dumping core ..."); 1395#endif 1396 ::abort(); // dump core (for debugging) 1397 } 1398 1399 ::exit(1); 1400} 1401 1402// Die immediately, no exit hook, no abort hook, no cleanup. 1403void os::die() { 1404 ::abort(); // dump core (for debugging) 1405} 1406 1407// DLL functions 1408 1409const char* os::dll_file_extension() { return ".so"; } 1410 1411// This must be hard coded because it's the system's temporary 1412// directory not the java application's temp directory, ala java.io.tmpdir. 1413const char* os::get_temp_directory() { return "/tmp"; } 1414 1415static bool file_exists(const char* filename) { 1416 struct stat statbuf; 1417 if (filename == NULL || strlen(filename) == 0) { 1418 return false; 1419 } 1420 return os::stat(filename, &statbuf) == 0; 1421} 1422 1423bool os::dll_build_name(char* buffer, size_t buflen, 1424 const char* pname, const char* fname) { 1425 bool retval = false; 1426 const size_t pnamelen = pname ? strlen(pname) : 0; 1427 1428 // Return error on buffer overflow. 1429 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1430 return retval; 1431 } 1432 1433 if (pnamelen == 0) { 1434 snprintf(buffer, buflen, "lib%s.so", fname); 1435 retval = true; 1436 } else if (strchr(pname, *os::path_separator()) != NULL) { 1437 int n; 1438 char** pelements = split_path(pname, &n); 1439 if (pelements == NULL) { 1440 return false; 1441 } 1442 for (int i = 0; i < n; i++) { 1443 // really shouldn't be NULL but what the heck, check can't hurt 1444 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1445 continue; // skip the empty path values 1446 } 1447 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1448 if (file_exists(buffer)) { 1449 retval = true; 1450 break; 1451 } 1452 } 1453 // release the storage 1454 for (int i = 0; i < n; i++) { 1455 if (pelements[i] != NULL) { 1456 FREE_C_HEAP_ARRAY(char, pelements[i]); 1457 } 1458 } 1459 if (pelements != NULL) { 1460 FREE_C_HEAP_ARRAY(char*, pelements); 1461 } 1462 } else { 1463 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1464 retval = true; 1465 } 1466 return retval; 1467} 1468 1469// check if addr is inside libjvm.so 1470bool os::address_is_in_vm(address addr) { 1471 static address libjvm_base_addr; 1472 Dl_info dlinfo; 1473 1474 if (libjvm_base_addr == NULL) { 1475 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { 1476 libjvm_base_addr = (address)dlinfo.dli_fbase; 1477 } 1478 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1479 } 1480 1481 if (dladdr((void *)addr, &dlinfo) != 0) { 1482 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1483 } 1484 1485 return false; 1486} 1487 1488typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int); 1489static dladdr1_func_type dladdr1_func = NULL; 1490 1491bool os::dll_address_to_function_name(address addr, char *buf, 1492 int buflen, int * offset, 1493 bool demangle) { 1494 // buf is not optional, but offset is optional 1495 assert(buf != NULL, "sanity check"); 1496 1497 Dl_info dlinfo; 1498 1499 // dladdr1_func was initialized in os::init() 1500 if (dladdr1_func != NULL) { 1501 // yes, we have dladdr1 1502 1503 // Support for dladdr1 is checked at runtime; it may be 1504 // available even if the vm is built on a machine that does 1505 // not have dladdr1 support. Make sure there is a value for 1506 // RTLD_DL_SYMENT. 1507#ifndef RTLD_DL_SYMENT 1508 #define RTLD_DL_SYMENT 1 1509#endif 1510#ifdef _LP64 1511 Elf64_Sym * info; 1512#else 1513 Elf32_Sym * info; 1514#endif 1515 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1516 RTLD_DL_SYMENT) != 0) { 1517 // see if we have a matching symbol that covers our address 1518 if (dlinfo.dli_saddr != NULL && 1519 (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { 1520 if (dlinfo.dli_sname != NULL) { 1521 if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) { 1522 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1523 } 1524 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1525 return true; 1526 } 1527 } 1528 // no matching symbol so try for just file info 1529 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1530 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1531 buf, buflen, offset, dlinfo.dli_fname, demangle)) { 1532 return true; 1533 } 1534 } 1535 } 1536 buf[0] = '\0'; 1537 if (offset != NULL) *offset = -1; 1538 return false; 1539 } 1540 1541 // no, only dladdr is available 1542 if (dladdr((void *)addr, &dlinfo) != 0) { 1543 // see if we have a matching symbol 1544 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { 1545 if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) { 1546 jio_snprintf(buf, buflen, dlinfo.dli_sname); 1547 } 1548 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1549 return true; 1550 } 1551 // no matching symbol so try for just file info 1552 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1553 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1554 buf, buflen, offset, dlinfo.dli_fname, demangle)) { 1555 return true; 1556 } 1557 } 1558 } 1559 buf[0] = '\0'; 1560 if (offset != NULL) *offset = -1; 1561 return false; 1562} 1563 1564bool os::dll_address_to_library_name(address addr, char* buf, 1565 int buflen, int* offset) { 1566 // buf is not optional, but offset is optional 1567 assert(buf != NULL, "sanity check"); 1568 1569 Dl_info dlinfo; 1570 1571 if (dladdr((void*)addr, &dlinfo) != 0) { 1572 if (dlinfo.dli_fname != NULL) { 1573 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 1574 } 1575 if (dlinfo.dli_fbase != NULL && offset != NULL) { 1576 *offset = addr - (address)dlinfo.dli_fbase; 1577 } 1578 return true; 1579 } 1580 1581 buf[0] = '\0'; 1582 if (offset) *offset = -1; 1583 return false; 1584} 1585 1586int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1587 Dl_info dli; 1588 // Sanity check? 1589 if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 || 1590 dli.dli_fname == NULL) { 1591 return 1; 1592 } 1593 1594 void * handle = dlopen(dli.dli_fname, RTLD_LAZY); 1595 if (handle == NULL) { 1596 return 1; 1597 } 1598 1599 Link_map *map; 1600 dlinfo(handle, RTLD_DI_LINKMAP, &map); 1601 if (map == NULL) { 1602 dlclose(handle); 1603 return 1; 1604 } 1605 1606 while (map->l_prev != NULL) { 1607 map = map->l_prev; 1608 } 1609 1610 while (map != NULL) { 1611 // Iterate through all map entries and call callback with fields of interest 1612 if(callback(map->l_name, (address)map->l_addr, (address)0, param)) { 1613 dlclose(handle); 1614 return 1; 1615 } 1616 map = map->l_next; 1617 } 1618 1619 dlclose(handle); 1620 return 0; 1621} 1622 1623int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) { 1624 outputStream * out = (outputStream *) param; 1625 out->print_cr(PTR_FORMAT " \t%s", base_address, name); 1626 return 0; 1627} 1628 1629void os::print_dll_info(outputStream * st) { 1630 st->print_cr("Dynamic libraries:"); st->flush(); 1631 if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) { 1632 st->print_cr("Error: Cannot print dynamic libraries."); 1633 } 1634} 1635 1636// Loads .dll/.so and 1637// in case of error it checks if .dll/.so was built for the 1638// same architecture as Hotspot is running on 1639 1640void * os::dll_load(const char *filename, char *ebuf, int ebuflen) { 1641 void * result= ::dlopen(filename, RTLD_LAZY); 1642 if (result != NULL) { 1643 // Successful loading 1644 return result; 1645 } 1646 1647 Elf32_Ehdr elf_head; 1648 1649 // Read system error message into ebuf 1650 // It may or may not be overwritten below 1651 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 1652 ebuf[ebuflen-1]='\0'; 1653 int diag_msg_max_length=ebuflen-strlen(ebuf); 1654 char* diag_msg_buf=ebuf+strlen(ebuf); 1655 1656 if (diag_msg_max_length==0) { 1657 // No more space in ebuf for additional diagnostics message 1658 return NULL; 1659 } 1660 1661 1662 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 1663 1664 if (file_descriptor < 0) { 1665 // Can't open library, report dlerror() message 1666 return NULL; 1667 } 1668 1669 bool failed_to_read_elf_head= 1670 (sizeof(elf_head)!= 1671 (::read(file_descriptor, &elf_head,sizeof(elf_head)))); 1672 1673 ::close(file_descriptor); 1674 if (failed_to_read_elf_head) { 1675 // file i/o error - report dlerror() msg 1676 return NULL; 1677 } 1678 1679 typedef struct { 1680 Elf32_Half code; // Actual value as defined in elf.h 1681 Elf32_Half compat_class; // Compatibility of archs at VM's sense 1682 char elf_class; // 32 or 64 bit 1683 char endianess; // MSB or LSB 1684 char* name; // String representation 1685 } arch_t; 1686 1687 static const arch_t arch_array[]={ 1688 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 1689 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 1690 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 1691 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 1692 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 1693 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 1694 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 1695 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 1696 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 1697 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 1698 }; 1699 1700#if (defined IA32) 1701 static Elf32_Half running_arch_code=EM_386; 1702#elif (defined AMD64) 1703 static Elf32_Half running_arch_code=EM_X86_64; 1704#elif (defined IA64) 1705 static Elf32_Half running_arch_code=EM_IA_64; 1706#elif (defined __sparc) && (defined _LP64) 1707 static Elf32_Half running_arch_code=EM_SPARCV9; 1708#elif (defined __sparc) && (!defined _LP64) 1709 static Elf32_Half running_arch_code=EM_SPARC; 1710#elif (defined __powerpc64__) 1711 static Elf32_Half running_arch_code=EM_PPC64; 1712#elif (defined __powerpc__) 1713 static Elf32_Half running_arch_code=EM_PPC; 1714#elif (defined ARM) 1715 static Elf32_Half running_arch_code=EM_ARM; 1716#else 1717 #error Method os::dll_load requires that one of following is defined:\ 1718 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 1719#endif 1720 1721 // Identify compatability class for VM's architecture and library's architecture 1722 // Obtain string descriptions for architectures 1723 1724 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 1725 int running_arch_index=-1; 1726 1727 for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) { 1728 if (running_arch_code == arch_array[i].code) { 1729 running_arch_index = i; 1730 } 1731 if (lib_arch.code == arch_array[i].code) { 1732 lib_arch.compat_class = arch_array[i].compat_class; 1733 lib_arch.name = arch_array[i].name; 1734 } 1735 } 1736 1737 assert(running_arch_index != -1, 1738 "Didn't find running architecture code (running_arch_code) in arch_array"); 1739 if (running_arch_index == -1) { 1740 // Even though running architecture detection failed 1741 // we may still continue with reporting dlerror() message 1742 return NULL; 1743 } 1744 1745 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 1746 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 1747 return NULL; 1748 } 1749 1750 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 1751 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 1752 return NULL; 1753 } 1754 1755 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 1756 if (lib_arch.name!=NULL) { 1757 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 1758 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 1759 lib_arch.name, arch_array[running_arch_index].name); 1760 } else { 1761 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 1762 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 1763 lib_arch.code, 1764 arch_array[running_arch_index].name); 1765 } 1766 } 1767 1768 return NULL; 1769} 1770 1771void* os::dll_lookup(void* handle, const char* name) { 1772 return dlsym(handle, name); 1773} 1774 1775void* os::get_default_process_handle() { 1776 return (void*)::dlopen(NULL, RTLD_LAZY); 1777} 1778 1779int os::stat(const char *path, struct stat *sbuf) { 1780 char pathbuf[MAX_PATH]; 1781 if (strlen(path) > MAX_PATH - 1) { 1782 errno = ENAMETOOLONG; 1783 return -1; 1784 } 1785 os::native_path(strcpy(pathbuf, path)); 1786 return ::stat(pathbuf, sbuf); 1787} 1788 1789static bool _print_ascii_file(const char* filename, outputStream* st) { 1790 int fd = ::open(filename, O_RDONLY); 1791 if (fd == -1) { 1792 return false; 1793 } 1794 1795 char buf[32]; 1796 int bytes; 1797 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { 1798 st->print_raw(buf, bytes); 1799 } 1800 1801 ::close(fd); 1802 1803 return true; 1804} 1805 1806void os::print_os_info_brief(outputStream* st) { 1807 os::Solaris::print_distro_info(st); 1808 1809 os::Posix::print_uname_info(st); 1810 1811 os::Solaris::print_libversion_info(st); 1812} 1813 1814void os::print_os_info(outputStream* st) { 1815 st->print("OS:"); 1816 1817 os::Solaris::print_distro_info(st); 1818 1819 os::Posix::print_uname_info(st); 1820 1821 os::Solaris::print_libversion_info(st); 1822 1823 os::Posix::print_rlimit_info(st); 1824 1825 os::Posix::print_load_average(st); 1826} 1827 1828void os::Solaris::print_distro_info(outputStream* st) { 1829 if (!_print_ascii_file("/etc/release", st)) { 1830 st->print("Solaris"); 1831 } 1832 st->cr(); 1833} 1834 1835void os::get_summary_os_info(char* buf, size_t buflen) { 1836 strncpy(buf, "Solaris", buflen); // default to plain solaris 1837 FILE* fp = fopen("/etc/release", "r"); 1838 if (fp != NULL) { 1839 char tmp[256]; 1840 // Only get the first line and chop out everything but the os name. 1841 if (fgets(tmp, sizeof(tmp), fp)) { 1842 char* ptr = tmp; 1843 // skip past whitespace characters 1844 while (*ptr != '\0' && (*ptr == ' ' || *ptr == '\t' || *ptr == '\n')) ptr++; 1845 if (*ptr != '\0') { 1846 char* nl = strchr(ptr, '\n'); 1847 if (nl != NULL) *nl = '\0'; 1848 strncpy(buf, ptr, buflen); 1849 } 1850 } 1851 fclose(fp); 1852 } 1853} 1854 1855void os::Solaris::print_libversion_info(outputStream* st) { 1856 st->print(" (T2 libthread)"); 1857 st->cr(); 1858} 1859 1860static bool check_addr0(outputStream* st) { 1861 jboolean status = false; 1862 int fd = ::open("/proc/self/map",O_RDONLY); 1863 if (fd >= 0) { 1864 prmap_t p; 1865 while (::read(fd, &p, sizeof(p)) > 0) { 1866 if (p.pr_vaddr == 0x0) { 1867 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 1868 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 1869 st->print("Access:"); 1870 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 1871 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 1872 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 1873 st->cr(); 1874 status = true; 1875 } 1876 } 1877 ::close(fd); 1878 } 1879 return status; 1880} 1881 1882void os::get_summary_cpu_info(char* buf, size_t buflen) { 1883 // Get MHz with system call. We don't seem to already have this. 1884 processor_info_t stats; 1885 processorid_t id = getcpuid(); 1886 int clock = 0; 1887 if (processor_info(id, &stats) != -1) { 1888 clock = stats.pi_clock; // pi_processor_type isn't more informative than below 1889 } 1890#ifdef AMD64 1891 snprintf(buf, buflen, "x86 64 bit %d MHz", clock); 1892#else 1893 // must be sparc 1894 snprintf(buf, buflen, "Sparcv9 64 bit %d MHz", clock); 1895#endif 1896} 1897 1898void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1899 // Nothing to do for now. 1900} 1901 1902void os::print_memory_info(outputStream* st) { 1903 st->print("Memory:"); 1904 st->print(" %dk page", os::vm_page_size()>>10); 1905 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 1906 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 1907 st->cr(); 1908 (void) check_addr0(st); 1909} 1910 1911void os::print_siginfo(outputStream* st, void* siginfo) { 1912 const siginfo_t* si = (const siginfo_t*)siginfo; 1913 1914 os::Posix::print_siginfo_brief(st, si); 1915 1916 if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 1917 UseSharedSpaces) { 1918 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1919 if (mapinfo->is_in_shared_space(si->si_addr)) { 1920 st->print("\n\nError accessing class data sharing archive." \ 1921 " Mapped file inaccessible during execution, " \ 1922 " possible disk/network problem."); 1923 } 1924 } 1925 st->cr(); 1926} 1927 1928// Moved from whole group, because we need them here for diagnostic 1929// prints. 1930#define OLDMAXSIGNUM 32 1931static int Maxsignum = 0; 1932static int *ourSigFlags = NULL; 1933 1934int os::Solaris::get_our_sigflags(int sig) { 1935 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 1936 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 1937 return ourSigFlags[sig]; 1938} 1939 1940void os::Solaris::set_our_sigflags(int sig, int flags) { 1941 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 1942 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 1943 ourSigFlags[sig] = flags; 1944} 1945 1946 1947static const char* get_signal_handler_name(address handler, 1948 char* buf, int buflen) { 1949 int offset; 1950 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 1951 if (found) { 1952 // skip directory names 1953 const char *p1, *p2; 1954 p1 = buf; 1955 size_t len = strlen(os::file_separator()); 1956 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 1957 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 1958 } else { 1959 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 1960 } 1961 return buf; 1962} 1963 1964static void print_signal_handler(outputStream* st, int sig, 1965 char* buf, size_t buflen) { 1966 struct sigaction sa; 1967 1968 sigaction(sig, NULL, &sa); 1969 1970 st->print("%s: ", os::exception_name(sig, buf, buflen)); 1971 1972 address handler = (sa.sa_flags & SA_SIGINFO) 1973 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 1974 : CAST_FROM_FN_PTR(address, sa.sa_handler); 1975 1976 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 1977 st->print("SIG_DFL"); 1978 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 1979 st->print("SIG_IGN"); 1980 } else { 1981 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 1982 } 1983 1984 st->print(", sa_mask[0]="); 1985 os::Posix::print_signal_set_short(st, &sa.sa_mask); 1986 1987 address rh = VMError::get_resetted_sighandler(sig); 1988 // May be, handler was resetted by VMError? 1989 if (rh != NULL) { 1990 handler = rh; 1991 sa.sa_flags = VMError::get_resetted_sigflags(sig); 1992 } 1993 1994 st->print(", sa_flags="); 1995 os::Posix::print_sa_flags(st, sa.sa_flags); 1996 1997 // Check: is it our handler? 1998 if (handler == CAST_FROM_FN_PTR(address, signalHandler)) { 1999 // It is our signal handler 2000 // check for flags 2001 if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2002 st->print( 2003 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2004 os::Solaris::get_our_sigflags(sig)); 2005 } 2006 } 2007 st->cr(); 2008} 2009 2010void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2011 st->print_cr("Signal Handlers:"); 2012 print_signal_handler(st, SIGSEGV, buf, buflen); 2013 print_signal_handler(st, SIGBUS , buf, buflen); 2014 print_signal_handler(st, SIGFPE , buf, buflen); 2015 print_signal_handler(st, SIGPIPE, buf, buflen); 2016 print_signal_handler(st, SIGXFSZ, buf, buflen); 2017 print_signal_handler(st, SIGILL , buf, buflen); 2018 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2019 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2020 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2021 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2022 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2023 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2024} 2025 2026static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2027 2028// Find the full path to the current module, libjvm.so 2029void os::jvm_path(char *buf, jint buflen) { 2030 // Error checking. 2031 if (buflen < MAXPATHLEN) { 2032 assert(false, "must use a large-enough buffer"); 2033 buf[0] = '\0'; 2034 return; 2035 } 2036 // Lazy resolve the path to current module. 2037 if (saved_jvm_path[0] != 0) { 2038 strcpy(buf, saved_jvm_path); 2039 return; 2040 } 2041 2042 Dl_info dlinfo; 2043 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2044 assert(ret != 0, "cannot locate libjvm"); 2045 if (ret != 0 && dlinfo.dli_fname != NULL) { 2046 realpath((char *)dlinfo.dli_fname, buf); 2047 } else { 2048 buf[0] = '\0'; 2049 return; 2050 } 2051 2052 if (Arguments::sun_java_launcher_is_altjvm()) { 2053 // Support for the java launcher's '-XXaltjvm=<path>' option. Typical 2054 // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". 2055 // If "/jre/lib/" appears at the right place in the string, then 2056 // assume we are installed in a JDK and we're done. Otherwise, check 2057 // for a JAVA_HOME environment variable and fix up the path so it 2058 // looks like libjvm.so is installed there (append a fake suffix 2059 // hotspot/libjvm.so). 2060 const char *p = buf + strlen(buf) - 1; 2061 for (int count = 0; p > buf && count < 5; ++count) { 2062 for (--p; p > buf && *p != '/'; --p) 2063 /* empty */ ; 2064 } 2065 2066 if (strncmp(p, "/jre/lib/", 9) != 0) { 2067 // Look for JAVA_HOME in the environment. 2068 char* java_home_var = ::getenv("JAVA_HOME"); 2069 if (java_home_var != NULL && java_home_var[0] != 0) { 2070 char cpu_arch[12]; 2071 char* jrelib_p; 2072 int len; 2073 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2074#ifdef _LP64 2075 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2076 if (strcmp(cpu_arch, "sparc") == 0) { 2077 strcat(cpu_arch, "v9"); 2078 } else if (strcmp(cpu_arch, "i386") == 0) { 2079 strcpy(cpu_arch, "amd64"); 2080 } 2081#endif 2082 // Check the current module name "libjvm.so". 2083 p = strrchr(buf, '/'); 2084 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2085 2086 realpath(java_home_var, buf); 2087 // determine if this is a legacy image or modules image 2088 // modules image doesn't have "jre" subdirectory 2089 len = strlen(buf); 2090 assert(len < buflen, "Ran out of buffer space"); 2091 jrelib_p = buf + len; 2092 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2093 if (0 != access(buf, F_OK)) { 2094 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2095 } 2096 2097 if (0 == access(buf, F_OK)) { 2098 // Use current module name "libjvm.so" 2099 len = strlen(buf); 2100 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so"); 2101 } else { 2102 // Go back to path of .so 2103 realpath((char *)dlinfo.dli_fname, buf); 2104 } 2105 } 2106 } 2107 } 2108 2109 strncpy(saved_jvm_path, buf, MAXPATHLEN); 2110 saved_jvm_path[MAXPATHLEN - 1] = '\0'; 2111} 2112 2113 2114void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2115 // no prefix required, not even "_" 2116} 2117 2118 2119void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2120 // no suffix required 2121} 2122 2123// This method is a copy of JDK's sysGetLastErrorString 2124// from src/solaris/hpi/src/system_md.c 2125 2126size_t os::lasterror(char *buf, size_t len) { 2127 if (errno == 0) return 0; 2128 2129 const char *s = ::strerror(errno); 2130 size_t n = ::strlen(s); 2131 if (n >= len) { 2132 n = len - 1; 2133 } 2134 ::strncpy(buf, s, n); 2135 buf[n] = '\0'; 2136 return n; 2137} 2138 2139 2140// sun.misc.Signal 2141 2142extern "C" { 2143 static void UserHandler(int sig, void *siginfo, void *context) { 2144 // Ctrl-C is pressed during error reporting, likely because the error 2145 // handler fails to abort. Let VM die immediately. 2146 if (sig == SIGINT && is_error_reported()) { 2147 os::die(); 2148 } 2149 2150 os::signal_notify(sig); 2151 // We do not need to reinstate the signal handler each time... 2152 } 2153} 2154 2155void* os::user_handler() { 2156 return CAST_FROM_FN_PTR(void*, UserHandler); 2157} 2158 2159struct timespec PosixSemaphore::create_timespec(unsigned int sec, int nsec) { 2160 struct timespec ts; 2161 unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); 2162 2163 return ts; 2164} 2165 2166extern "C" { 2167 typedef void (*sa_handler_t)(int); 2168 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2169} 2170 2171void* os::signal(int signal_number, void* handler) { 2172 struct sigaction sigAct, oldSigAct; 2173 sigfillset(&(sigAct.sa_mask)); 2174 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2175 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2176 2177 if (sigaction(signal_number, &sigAct, &oldSigAct)) { 2178 // -1 means registration failed 2179 return (void *)-1; 2180 } 2181 2182 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2183} 2184 2185void os::signal_raise(int signal_number) { 2186 raise(signal_number); 2187} 2188 2189// The following code is moved from os.cpp for making this 2190// code platform specific, which it is by its very nature. 2191 2192// a counter for each possible signal value 2193static int Sigexit = 0; 2194static int Maxlibjsigsigs; 2195static jint *pending_signals = NULL; 2196static int *preinstalled_sigs = NULL; 2197static struct sigaction *chainedsigactions = NULL; 2198static sema_t sig_sem; 2199typedef int (*version_getting_t)(); 2200version_getting_t os::Solaris::get_libjsig_version = NULL; 2201static int libjsigversion = NULL; 2202 2203int os::sigexitnum_pd() { 2204 assert(Sigexit > 0, "signal memory not yet initialized"); 2205 return Sigexit; 2206} 2207 2208void os::Solaris::init_signal_mem() { 2209 // Initialize signal structures 2210 Maxsignum = SIGRTMAX; 2211 Sigexit = Maxsignum+1; 2212 assert(Maxsignum >0, "Unable to obtain max signal number"); 2213 2214 Maxlibjsigsigs = Maxsignum; 2215 2216 // pending_signals has one int per signal 2217 // The additional signal is for SIGEXIT - exit signal to signal_thread 2218 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal); 2219 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2220 2221 if (UseSignalChaining) { 2222 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2223 * (Maxsignum + 1), mtInternal); 2224 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2225 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal); 2226 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2227 } 2228 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal); 2229 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2230} 2231 2232void os::signal_init_pd() { 2233 int ret; 2234 2235 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2236 assert(ret == 0, "sema_init() failed"); 2237} 2238 2239void os::signal_notify(int signal_number) { 2240 int ret; 2241 2242 Atomic::inc(&pending_signals[signal_number]); 2243 ret = ::sema_post(&sig_sem); 2244 assert(ret == 0, "sema_post() failed"); 2245} 2246 2247static int check_pending_signals(bool wait_for_signal) { 2248 int ret; 2249 while (true) { 2250 for (int i = 0; i < Sigexit + 1; i++) { 2251 jint n = pending_signals[i]; 2252 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2253 return i; 2254 } 2255 } 2256 if (!wait_for_signal) { 2257 return -1; 2258 } 2259 JavaThread *thread = JavaThread::current(); 2260 ThreadBlockInVM tbivm(thread); 2261 2262 bool threadIsSuspended; 2263 do { 2264 thread->set_suspend_equivalent(); 2265 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2266 while ((ret = ::sema_wait(&sig_sem)) == EINTR) 2267 ; 2268 assert(ret == 0, "sema_wait() failed"); 2269 2270 // were we externally suspended while we were waiting? 2271 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2272 if (threadIsSuspended) { 2273 // The semaphore has been incremented, but while we were waiting 2274 // another thread suspended us. We don't want to continue running 2275 // while suspended because that would surprise the thread that 2276 // suspended us. 2277 ret = ::sema_post(&sig_sem); 2278 assert(ret == 0, "sema_post() failed"); 2279 2280 thread->java_suspend_self(); 2281 } 2282 } while (threadIsSuspended); 2283 } 2284} 2285 2286int os::signal_lookup() { 2287 return check_pending_signals(false); 2288} 2289 2290int os::signal_wait() { 2291 return check_pending_signals(true); 2292} 2293 2294//////////////////////////////////////////////////////////////////////////////// 2295// Virtual Memory 2296 2297static int page_size = -1; 2298 2299// The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2300// clear this var if support is not available. 2301static bool has_map_align = true; 2302 2303int os::vm_page_size() { 2304 assert(page_size != -1, "must call os::init"); 2305 return page_size; 2306} 2307 2308// Solaris allocates memory by pages. 2309int os::vm_allocation_granularity() { 2310 assert(page_size != -1, "must call os::init"); 2311 return page_size; 2312} 2313 2314static bool recoverable_mmap_error(int err) { 2315 // See if the error is one we can let the caller handle. This 2316 // list of errno values comes from the Solaris mmap(2) man page. 2317 switch (err) { 2318 case EBADF: 2319 case EINVAL: 2320 case ENOTSUP: 2321 // let the caller deal with these errors 2322 return true; 2323 2324 default: 2325 // Any remaining errors on this OS can cause our reserved mapping 2326 // to be lost. That can cause confusion where different data 2327 // structures think they have the same memory mapped. The worst 2328 // scenario is if both the VM and a library think they have the 2329 // same memory mapped. 2330 return false; 2331 } 2332} 2333 2334static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec, 2335 int err) { 2336 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2337 ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec, 2338 strerror(err), err); 2339} 2340 2341static void warn_fail_commit_memory(char* addr, size_t bytes, 2342 size_t alignment_hint, bool exec, 2343 int err) { 2344 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2345 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, 2346 alignment_hint, exec, strerror(err), err); 2347} 2348 2349int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) { 2350 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2351 size_t size = bytes; 2352 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2353 if (res != NULL) { 2354 if (UseNUMAInterleaving) { 2355 numa_make_global(addr, bytes); 2356 } 2357 return 0; 2358 } 2359 2360 int err = errno; // save errno from mmap() call in mmap_chunk() 2361 2362 if (!recoverable_mmap_error(err)) { 2363 warn_fail_commit_memory(addr, bytes, exec, err); 2364 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory."); 2365 } 2366 2367 return err; 2368} 2369 2370bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 2371 return Solaris::commit_memory_impl(addr, bytes, exec) == 0; 2372} 2373 2374void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec, 2375 const char* mesg) { 2376 assert(mesg != NULL, "mesg must be specified"); 2377 int err = os::Solaris::commit_memory_impl(addr, bytes, exec); 2378 if (err != 0) { 2379 // the caller wants all commit errors to exit with the specified mesg: 2380 warn_fail_commit_memory(addr, bytes, exec, err); 2381 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "%s", mesg); 2382 } 2383} 2384 2385size_t os::Solaris::page_size_for_alignment(size_t alignment) { 2386 assert(is_size_aligned(alignment, (size_t) vm_page_size()), 2387 SIZE_FORMAT " is not aligned to " SIZE_FORMAT, 2388 alignment, (size_t) vm_page_size()); 2389 2390 for (int i = 0; _page_sizes[i] != 0; i++) { 2391 if (is_size_aligned(alignment, _page_sizes[i])) { 2392 return _page_sizes[i]; 2393 } 2394 } 2395 2396 return (size_t) vm_page_size(); 2397} 2398 2399int os::Solaris::commit_memory_impl(char* addr, size_t bytes, 2400 size_t alignment_hint, bool exec) { 2401 int err = Solaris::commit_memory_impl(addr, bytes, exec); 2402 if (err == 0 && UseLargePages && alignment_hint > 0) { 2403 assert(is_size_aligned(bytes, alignment_hint), 2404 SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint); 2405 2406 // The syscall memcntl requires an exact page size (see man memcntl for details). 2407 size_t page_size = page_size_for_alignment(alignment_hint); 2408 if (page_size > (size_t) vm_page_size()) { 2409 (void)Solaris::setup_large_pages(addr, bytes, page_size); 2410 } 2411 } 2412 return err; 2413} 2414 2415bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2416 bool exec) { 2417 return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0; 2418} 2419 2420void os::pd_commit_memory_or_exit(char* addr, size_t bytes, 2421 size_t alignment_hint, bool exec, 2422 const char* mesg) { 2423 assert(mesg != NULL, "mesg must be specified"); 2424 int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec); 2425 if (err != 0) { 2426 // the caller wants all commit errors to exit with the specified mesg: 2427 warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err); 2428 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "%s", mesg); 2429 } 2430} 2431 2432// Uncommit the pages in a specified region. 2433void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) { 2434 if (madvise(addr, bytes, MADV_FREE) < 0) { 2435 debug_only(warning("MADV_FREE failed.")); 2436 return; 2437 } 2438} 2439 2440bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2441 return os::commit_memory(addr, size, !ExecMem); 2442} 2443 2444bool os::remove_stack_guard_pages(char* addr, size_t size) { 2445 return os::uncommit_memory(addr, size); 2446} 2447 2448// Change the page size in a given range. 2449void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2450 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2451 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2452 if (UseLargePages) { 2453 size_t page_size = Solaris::page_size_for_alignment(alignment_hint); 2454 if (page_size > (size_t) vm_page_size()) { 2455 Solaris::setup_large_pages(addr, bytes, page_size); 2456 } 2457 } 2458} 2459 2460// Tell the OS to make the range local to the first-touching LWP 2461void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2462 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2463 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2464 debug_only(warning("MADV_ACCESS_LWP failed.")); 2465 } 2466} 2467 2468// Tell the OS that this range would be accessed from different LWPs. 2469void os::numa_make_global(char *addr, size_t bytes) { 2470 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2471 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2472 debug_only(warning("MADV_ACCESS_MANY failed.")); 2473 } 2474} 2475 2476// Get the number of the locality groups. 2477size_t os::numa_get_groups_num() { 2478 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2479 return n != -1 ? n : 1; 2480} 2481 2482// Get a list of leaf locality groups. A leaf lgroup is group that 2483// doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2484// board. An LWP is assigned to one of these groups upon creation. 2485size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2486 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2487 ids[0] = 0; 2488 return 1; 2489 } 2490 int result_size = 0, top = 1, bottom = 0, cur = 0; 2491 for (int k = 0; k < size; k++) { 2492 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2493 (Solaris::lgrp_id_t*)&ids[top], size - top); 2494 if (r == -1) { 2495 ids[0] = 0; 2496 return 1; 2497 } 2498 if (!r) { 2499 // That's a leaf node. 2500 assert(bottom <= cur, "Sanity check"); 2501 // Check if the node has memory 2502 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2503 NULL, 0, LGRP_RSRC_MEM) > 0) { 2504 ids[bottom++] = ids[cur]; 2505 } 2506 } 2507 top += r; 2508 cur++; 2509 } 2510 if (bottom == 0) { 2511 // Handle a situation, when the OS reports no memory available. 2512 // Assume UMA architecture. 2513 ids[0] = 0; 2514 return 1; 2515 } 2516 return bottom; 2517} 2518 2519// Detect the topology change. Typically happens during CPU plugging-unplugging. 2520bool os::numa_topology_changed() { 2521 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2522 if (is_stale != -1 && is_stale) { 2523 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2524 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2525 assert(c != 0, "Failure to initialize LGRP API"); 2526 Solaris::set_lgrp_cookie(c); 2527 return true; 2528 } 2529 return false; 2530} 2531 2532// Get the group id of the current LWP. 2533int os::numa_get_group_id() { 2534 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2535 if (lgrp_id == -1) { 2536 return 0; 2537 } 2538 const int size = os::numa_get_groups_num(); 2539 int *ids = (int*)alloca(size * sizeof(int)); 2540 2541 // Get the ids of all lgroups with memory; r is the count. 2542 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2543 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2544 if (r <= 0) { 2545 return 0; 2546 } 2547 return ids[os::random() % r]; 2548} 2549 2550// Request information about the page. 2551bool os::get_page_info(char *start, page_info* info) { 2552 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2553 uint64_t addr = (uintptr_t)start; 2554 uint64_t outdata[2]; 2555 uint_t validity = 0; 2556 2557 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2558 return false; 2559 } 2560 2561 info->size = 0; 2562 info->lgrp_id = -1; 2563 2564 if ((validity & 1) != 0) { 2565 if ((validity & 2) != 0) { 2566 info->lgrp_id = outdata[0]; 2567 } 2568 if ((validity & 4) != 0) { 2569 info->size = outdata[1]; 2570 } 2571 return true; 2572 } 2573 return false; 2574} 2575 2576// Scan the pages from start to end until a page different than 2577// the one described in the info parameter is encountered. 2578char *os::scan_pages(char *start, char* end, page_info* page_expected, 2579 page_info* page_found) { 2580 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2581 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 2582 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1]; 2583 uint_t validity[MAX_MEMINFO_CNT]; 2584 2585 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 2586 uint64_t p = (uint64_t)start; 2587 while (p < (uint64_t)end) { 2588 addrs[0] = p; 2589 size_t addrs_count = 1; 2590 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) { 2591 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 2592 addrs_count++; 2593 } 2594 2595 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 2596 return NULL; 2597 } 2598 2599 size_t i = 0; 2600 for (; i < addrs_count; i++) { 2601 if ((validity[i] & 1) != 0) { 2602 if ((validity[i] & 4) != 0) { 2603 if (outdata[types * i + 1] != page_expected->size) { 2604 break; 2605 } 2606 } else if (page_expected->size != 0) { 2607 break; 2608 } 2609 2610 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 2611 if (outdata[types * i] != page_expected->lgrp_id) { 2612 break; 2613 } 2614 } 2615 } else { 2616 return NULL; 2617 } 2618 } 2619 2620 if (i < addrs_count) { 2621 if ((validity[i] & 2) != 0) { 2622 page_found->lgrp_id = outdata[types * i]; 2623 } else { 2624 page_found->lgrp_id = -1; 2625 } 2626 if ((validity[i] & 4) != 0) { 2627 page_found->size = outdata[types * i + 1]; 2628 } else { 2629 page_found->size = 0; 2630 } 2631 return (char*)addrs[i]; 2632 } 2633 2634 p = addrs[addrs_count - 1] + page_size; 2635 } 2636 return end; 2637} 2638 2639bool os::pd_uncommit_memory(char* addr, size_t bytes) { 2640 size_t size = bytes; 2641 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2642 // uncommitted page. Otherwise, the read/write might succeed if we 2643 // have enough swap space to back the physical page. 2644 return 2645 NULL != Solaris::mmap_chunk(addr, size, 2646 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 2647 PROT_NONE); 2648} 2649 2650char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 2651 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 2652 2653 if (b == MAP_FAILED) { 2654 return NULL; 2655 } 2656 return b; 2657} 2658 2659char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, 2660 size_t alignment_hint, bool fixed) { 2661 char* addr = requested_addr; 2662 int flags = MAP_PRIVATE | MAP_NORESERVE; 2663 2664 assert(!(fixed && (alignment_hint > 0)), 2665 "alignment hint meaningless with fixed mmap"); 2666 2667 if (fixed) { 2668 flags |= MAP_FIXED; 2669 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 2670 flags |= MAP_ALIGN; 2671 addr = (char*) alignment_hint; 2672 } 2673 2674 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2675 // uncommitted page. Otherwise, the read/write might succeed if we 2676 // have enough swap space to back the physical page. 2677 return mmap_chunk(addr, bytes, flags, PROT_NONE); 2678} 2679 2680char* os::pd_reserve_memory(size_t bytes, char* requested_addr, 2681 size_t alignment_hint) { 2682 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, 2683 (requested_addr != NULL)); 2684 2685 guarantee(requested_addr == NULL || requested_addr == addr, 2686 "OS failed to return requested mmap address."); 2687 return addr; 2688} 2689 2690// Reserve memory at an arbitrary address, only if that area is 2691// available (and not reserved for something else). 2692 2693char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2694 const int max_tries = 10; 2695 char* base[max_tries]; 2696 size_t size[max_tries]; 2697 2698 // Solaris adds a gap between mmap'ed regions. The size of the gap 2699 // is dependent on the requested size and the MMU. Our initial gap 2700 // value here is just a guess and will be corrected later. 2701 bool had_top_overlap = false; 2702 bool have_adjusted_gap = false; 2703 size_t gap = 0x400000; 2704 2705 // Assert only that the size is a multiple of the page size, since 2706 // that's all that mmap requires, and since that's all we really know 2707 // about at this low abstraction level. If we need higher alignment, 2708 // we can either pass an alignment to this method or verify alignment 2709 // in one of the methods further up the call chain. See bug 5044738. 2710 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 2711 2712 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 2713 // Give it a try, if the kernel honors the hint we can return immediately. 2714 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 2715 2716 volatile int err = errno; 2717 if (addr == requested_addr) { 2718 return addr; 2719 } else if (addr != NULL) { 2720 pd_unmap_memory(addr, bytes); 2721 } 2722 2723 if (PrintMiscellaneous && Verbose) { 2724 char buf[256]; 2725 buf[0] = '\0'; 2726 if (addr == NULL) { 2727 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 2728 } 2729 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " 2730 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 2731 "%s", bytes, requested_addr, addr, buf); 2732 } 2733 2734 // Address hint method didn't work. Fall back to the old method. 2735 // In theory, once SNV becomes our oldest supported platform, this 2736 // code will no longer be needed. 2737 // 2738 // Repeatedly allocate blocks until the block is allocated at the 2739 // right spot. Give up after max_tries. 2740 int i; 2741 for (i = 0; i < max_tries; ++i) { 2742 base[i] = reserve_memory(bytes); 2743 2744 if (base[i] != NULL) { 2745 // Is this the block we wanted? 2746 if (base[i] == requested_addr) { 2747 size[i] = bytes; 2748 break; 2749 } 2750 2751 // check that the gap value is right 2752 if (had_top_overlap && !have_adjusted_gap) { 2753 size_t actual_gap = base[i-1] - base[i] - bytes; 2754 if (gap != actual_gap) { 2755 // adjust the gap value and retry the last 2 allocations 2756 assert(i > 0, "gap adjustment code problem"); 2757 have_adjusted_gap = true; // adjust the gap only once, just in case 2758 gap = actual_gap; 2759 if (PrintMiscellaneous && Verbose) { 2760 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 2761 } 2762 unmap_memory(base[i], bytes); 2763 unmap_memory(base[i-1], size[i-1]); 2764 i-=2; 2765 continue; 2766 } 2767 } 2768 2769 // Does this overlap the block we wanted? Give back the overlapped 2770 // parts and try again. 2771 // 2772 // There is still a bug in this code: if top_overlap == bytes, 2773 // the overlap is offset from requested region by the value of gap. 2774 // In this case giving back the overlapped part will not work, 2775 // because we'll give back the entire block at base[i] and 2776 // therefore the subsequent allocation will not generate a new gap. 2777 // This could be fixed with a new algorithm that used larger 2778 // or variable size chunks to find the requested region - 2779 // but such a change would introduce additional complications. 2780 // It's rare enough that the planets align for this bug, 2781 // so we'll just wait for a fix for 6204603/5003415 which 2782 // will provide a mmap flag to allow us to avoid this business. 2783 2784 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 2785 if (top_overlap >= 0 && top_overlap < bytes) { 2786 had_top_overlap = true; 2787 unmap_memory(base[i], top_overlap); 2788 base[i] += top_overlap; 2789 size[i] = bytes - top_overlap; 2790 } else { 2791 size_t bottom_overlap = base[i] + bytes - requested_addr; 2792 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 2793 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 2794 warning("attempt_reserve_memory_at: possible alignment bug"); 2795 } 2796 unmap_memory(requested_addr, bottom_overlap); 2797 size[i] = bytes - bottom_overlap; 2798 } else { 2799 size[i] = bytes; 2800 } 2801 } 2802 } 2803 } 2804 2805 // Give back the unused reserved pieces. 2806 2807 for (int j = 0; j < i; ++j) { 2808 if (base[j] != NULL) { 2809 unmap_memory(base[j], size[j]); 2810 } 2811 } 2812 2813 return (i < max_tries) ? requested_addr : NULL; 2814} 2815 2816bool os::pd_release_memory(char* addr, size_t bytes) { 2817 size_t size = bytes; 2818 return munmap(addr, size) == 0; 2819} 2820 2821static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 2822 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 2823 "addr must be page aligned"); 2824 int retVal = mprotect(addr, bytes, prot); 2825 return retVal == 0; 2826} 2827 2828// Protect memory (Used to pass readonly pages through 2829// JNI GetArray<type>Elements with empty arrays.) 2830// Also, used for serialization page and for compressed oops null pointer 2831// checking. 2832bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 2833 bool is_committed) { 2834 unsigned int p = 0; 2835 switch (prot) { 2836 case MEM_PROT_NONE: p = PROT_NONE; break; 2837 case MEM_PROT_READ: p = PROT_READ; break; 2838 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 2839 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 2840 default: 2841 ShouldNotReachHere(); 2842 } 2843 // is_committed is unused. 2844 return solaris_mprotect(addr, bytes, p); 2845} 2846 2847// guard_memory and unguard_memory only happens within stack guard pages. 2848// Since ISM pertains only to the heap, guard and unguard memory should not 2849/// happen with an ISM region. 2850bool os::guard_memory(char* addr, size_t bytes) { 2851 return solaris_mprotect(addr, bytes, PROT_NONE); 2852} 2853 2854bool os::unguard_memory(char* addr, size_t bytes) { 2855 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 2856} 2857 2858// Large page support 2859static size_t _large_page_size = 0; 2860 2861// Insertion sort for small arrays (descending order). 2862static void insertion_sort_descending(size_t* array, int len) { 2863 for (int i = 0; i < len; i++) { 2864 size_t val = array[i]; 2865 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 2866 size_t tmp = array[key]; 2867 array[key] = array[key - 1]; 2868 array[key - 1] = tmp; 2869 } 2870 } 2871} 2872 2873bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) { 2874 const unsigned int usable_count = VM_Version::page_size_count(); 2875 if (usable_count == 1) { 2876 return false; 2877 } 2878 2879 // Find the right getpagesizes interface. When solaris 11 is the minimum 2880 // build platform, getpagesizes() (without the '2') can be called directly. 2881 typedef int (*gps_t)(size_t[], int); 2882 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2")); 2883 if (gps_func == NULL) { 2884 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes")); 2885 if (gps_func == NULL) { 2886 if (warn) { 2887 warning("MPSS is not supported by the operating system."); 2888 } 2889 return false; 2890 } 2891 } 2892 2893 // Fill the array of page sizes. 2894 int n = (*gps_func)(_page_sizes, page_sizes_max); 2895 assert(n > 0, "Solaris bug?"); 2896 2897 if (n == page_sizes_max) { 2898 // Add a sentinel value (necessary only if the array was completely filled 2899 // since it is static (zeroed at initialization)). 2900 _page_sizes[--n] = 0; 2901 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 2902 } 2903 assert(_page_sizes[n] == 0, "missing sentinel"); 2904 trace_page_sizes("available page sizes", _page_sizes, n); 2905 2906 if (n == 1) return false; // Only one page size available. 2907 2908 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 2909 // select up to usable_count elements. First sort the array, find the first 2910 // acceptable value, then copy the usable sizes to the top of the array and 2911 // trim the rest. Make sure to include the default page size :-). 2912 // 2913 // A better policy could get rid of the 4M limit by taking the sizes of the 2914 // important VM memory regions (java heap and possibly the code cache) into 2915 // account. 2916 insertion_sort_descending(_page_sizes, n); 2917 const size_t size_limit = 2918 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 2919 int beg; 2920 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */; 2921 const int end = MIN2((int)usable_count, n) - 1; 2922 for (int cur = 0; cur < end; ++cur, ++beg) { 2923 _page_sizes[cur] = _page_sizes[beg]; 2924 } 2925 _page_sizes[end] = vm_page_size(); 2926 _page_sizes[end + 1] = 0; 2927 2928 if (_page_sizes[end] > _page_sizes[end - 1]) { 2929 // Default page size is not the smallest; sort again. 2930 insertion_sort_descending(_page_sizes, end + 1); 2931 } 2932 *page_size = _page_sizes[0]; 2933 2934 trace_page_sizes("usable page sizes", _page_sizes, end + 1); 2935 return true; 2936} 2937 2938void os::large_page_init() { 2939 if (UseLargePages) { 2940 // print a warning if any large page related flag is specified on command line 2941 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2942 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2943 2944 UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 2945 } 2946} 2947 2948bool os::Solaris::is_valid_page_size(size_t bytes) { 2949 for (int i = 0; _page_sizes[i] != 0; i++) { 2950 if (_page_sizes[i] == bytes) { 2951 return true; 2952 } 2953 } 2954 return false; 2955} 2956 2957bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) { 2958 assert(is_valid_page_size(align), SIZE_FORMAT " is not a valid page size", align); 2959 assert(is_ptr_aligned((void*) start, align), 2960 PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align); 2961 assert(is_size_aligned(bytes, align), 2962 SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align); 2963 2964 // Signal to OS that we want large pages for addresses 2965 // from addr, addr + bytes 2966 struct memcntl_mha mpss_struct; 2967 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 2968 mpss_struct.mha_pagesize = align; 2969 mpss_struct.mha_flags = 0; 2970 // Upon successful completion, memcntl() returns 0 2971 if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) { 2972 debug_only(warning("Attempt to use MPSS failed.")); 2973 return false; 2974 } 2975 return true; 2976} 2977 2978char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) { 2979 fatal("os::reserve_memory_special should not be called on Solaris."); 2980 return NULL; 2981} 2982 2983bool os::release_memory_special(char* base, size_t bytes) { 2984 fatal("os::release_memory_special should not be called on Solaris."); 2985 return false; 2986} 2987 2988size_t os::large_page_size() { 2989 return _large_page_size; 2990} 2991 2992// MPSS allows application to commit large page memory on demand; with ISM 2993// the entire memory region must be allocated as shared memory. 2994bool os::can_commit_large_page_memory() { 2995 return true; 2996} 2997 2998bool os::can_execute_large_page_memory() { 2999 return true; 3000} 3001 3002// Read calls from inside the vm need to perform state transitions 3003size_t os::read(int fd, void *buf, unsigned int nBytes) { 3004 size_t res; 3005 JavaThread* thread = (JavaThread*)Thread::current(); 3006 assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm"); 3007 ThreadBlockInVM tbiv(thread); 3008 RESTARTABLE(::read(fd, buf, (size_t) nBytes), res); 3009 return res; 3010} 3011 3012size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 3013 size_t res; 3014 JavaThread* thread = (JavaThread*)Thread::current(); 3015 assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm"); 3016 ThreadBlockInVM tbiv(thread); 3017 RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res); 3018 return res; 3019} 3020 3021size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { 3022 size_t res; 3023 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 3024 "Assumed _thread_in_native"); 3025 RESTARTABLE(::read(fd, buf, (size_t) nBytes), res); 3026 return res; 3027} 3028 3029void os::naked_short_sleep(jlong ms) { 3030 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3031 3032 // usleep is deprecated and removed from POSIX, in favour of nanosleep, but 3033 // Solaris requires -lrt for this. 3034 usleep((ms * 1000)); 3035 3036 return; 3037} 3038 3039// Sleep forever; naked call to OS-specific sleep; use with CAUTION 3040void os::infinite_sleep() { 3041 while (true) { // sleep forever ... 3042 ::sleep(100); // ... 100 seconds at a time 3043 } 3044} 3045 3046// Used to convert frequent JVM_Yield() to nops 3047bool os::dont_yield() { 3048 if (DontYieldALot) { 3049 static hrtime_t last_time = 0; 3050 hrtime_t diff = getTimeNanos() - last_time; 3051 3052 if (diff < DontYieldALotInterval * 1000000) { 3053 return true; 3054 } 3055 3056 last_time += diff; 3057 3058 return false; 3059 } else { 3060 return false; 3061 } 3062} 3063 3064// Note that yield semantics are defined by the scheduling class to which 3065// the thread currently belongs. Typically, yield will _not yield to 3066// other equal or higher priority threads that reside on the dispatch queues 3067// of other CPUs. 3068 3069void os::naked_yield() { 3070 thr_yield(); 3071} 3072 3073// Interface for setting lwp priorities. If we are using T2 libthread, 3074// which forces the use of BoundThreads or we manually set UseBoundThreads, 3075// all of our threads will be assigned to real lwp's. Using the thr_setprio 3076// function is meaningless in this mode so we must adjust the real lwp's priority 3077// The routines below implement the getting and setting of lwp priorities. 3078// 3079// Note: T2 is now the only supported libthread. UseBoundThreads flag is 3080// being deprecated and all threads are now BoundThreads 3081// 3082// Note: There are three priority scales used on Solaris. Java priotities 3083// which range from 1 to 10, libthread "thr_setprio" scale which range 3084// from 0 to 127, and the current scheduling class of the process we 3085// are running in. This is typically from -60 to +60. 3086// The setting of the lwp priorities in done after a call to thr_setprio 3087// so Java priorities are mapped to libthread priorities and we map from 3088// the latter to lwp priorities. We don't keep priorities stored in 3089// Java priorities since some of our worker threads want to set priorities 3090// higher than all Java threads. 3091// 3092// For related information: 3093// (1) man -s 2 priocntl 3094// (2) man -s 4 priocntl 3095// (3) man dispadmin 3096// = librt.so 3097// = libthread/common/rtsched.c - thrp_setlwpprio(). 3098// = ps -cL <pid> ... to validate priority. 3099// = sched_get_priority_min and _max 3100// pthread_create 3101// sched_setparam 3102// pthread_setschedparam 3103// 3104// Assumptions: 3105// + We assume that all threads in the process belong to the same 3106// scheduling class. IE. an homogenous process. 3107// + Must be root or in IA group to change change "interactive" attribute. 3108// Priocntl() will fail silently. The only indication of failure is when 3109// we read-back the value and notice that it hasn't changed. 3110// + Interactive threads enter the runq at the head, non-interactive at the tail. 3111// + For RT, change timeslice as well. Invariant: 3112// constant "priority integral" 3113// Konst == TimeSlice * (60-Priority) 3114// Given a priority, compute appropriate timeslice. 3115// + Higher numerical values have higher priority. 3116 3117// sched class attributes 3118typedef struct { 3119 int schedPolicy; // classID 3120 int maxPrio; 3121 int minPrio; 3122} SchedInfo; 3123 3124 3125static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits; 3126 3127#ifdef ASSERT 3128static int ReadBackValidate = 1; 3129#endif 3130static int myClass = 0; 3131static int myMin = 0; 3132static int myMax = 0; 3133static int myCur = 0; 3134static bool priocntl_enable = false; 3135 3136static const int criticalPrio = FXCriticalPriority; 3137static int java_MaxPriority_to_os_priority = 0; // Saved mapping 3138 3139 3140// lwp_priocntl_init 3141// 3142// Try to determine the priority scale for our process. 3143// 3144// Return errno or 0 if OK. 3145// 3146static int lwp_priocntl_init() { 3147 int rslt; 3148 pcinfo_t ClassInfo; 3149 pcparms_t ParmInfo; 3150 int i; 3151 3152 if (!UseThreadPriorities) return 0; 3153 3154 // If ThreadPriorityPolicy is 1, switch tables 3155 if (ThreadPriorityPolicy == 1) { 3156 for (i = 0; i < CriticalPriority+1; i++) 3157 os::java_to_os_priority[i] = prio_policy1[i]; 3158 } 3159 if (UseCriticalJavaThreadPriority) { 3160 // MaxPriority always maps to the FX scheduling class and criticalPrio. 3161 // See set_native_priority() and set_lwp_class_and_priority(). 3162 // Save original MaxPriority mapping in case attempt to 3163 // use critical priority fails. 3164 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority]; 3165 // Set negative to distinguish from other priorities 3166 os::java_to_os_priority[MaxPriority] = -criticalPrio; 3167 } 3168 3169 // Get IDs for a set of well-known scheduling classes. 3170 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3171 // the system. We should have a loop that iterates over the 3172 // classID values, which are known to be "small" integers. 3173 3174 strcpy(ClassInfo.pc_clname, "TS"); 3175 ClassInfo.pc_cid = -1; 3176 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3177 if (rslt < 0) return errno; 3178 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3179 tsLimits.schedPolicy = ClassInfo.pc_cid; 3180 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3181 tsLimits.minPrio = -tsLimits.maxPrio; 3182 3183 strcpy(ClassInfo.pc_clname, "IA"); 3184 ClassInfo.pc_cid = -1; 3185 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3186 if (rslt < 0) return errno; 3187 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3188 iaLimits.schedPolicy = ClassInfo.pc_cid; 3189 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3190 iaLimits.minPrio = -iaLimits.maxPrio; 3191 3192 strcpy(ClassInfo.pc_clname, "RT"); 3193 ClassInfo.pc_cid = -1; 3194 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3195 if (rslt < 0) return errno; 3196 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3197 rtLimits.schedPolicy = ClassInfo.pc_cid; 3198 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3199 rtLimits.minPrio = 0; 3200 3201 strcpy(ClassInfo.pc_clname, "FX"); 3202 ClassInfo.pc_cid = -1; 3203 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3204 if (rslt < 0) return errno; 3205 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1"); 3206 fxLimits.schedPolicy = ClassInfo.pc_cid; 3207 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri; 3208 fxLimits.minPrio = 0; 3209 3210 // Query our "current" scheduling class. 3211 // This will normally be IA, TS or, rarely, FX or RT. 3212 memset(&ParmInfo, 0, sizeof(ParmInfo)); 3213 ParmInfo.pc_cid = PC_CLNULL; 3214 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3215 if (rslt < 0) return errno; 3216 myClass = ParmInfo.pc_cid; 3217 3218 // We now know our scheduling classId, get specific information 3219 // about the class. 3220 ClassInfo.pc_cid = myClass; 3221 ClassInfo.pc_clname[0] = 0; 3222 rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); 3223 if (rslt < 0) return errno; 3224 3225 if (ThreadPriorityVerbose) { 3226 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3227 } 3228 3229 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3230 ParmInfo.pc_cid = PC_CLNULL; 3231 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3232 if (rslt < 0) return errno; 3233 3234 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3235 myMin = rtLimits.minPrio; 3236 myMax = rtLimits.maxPrio; 3237 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3238 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3239 myMin = iaLimits.minPrio; 3240 myMax = iaLimits.maxPrio; 3241 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3242 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3243 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3244 myMin = tsLimits.minPrio; 3245 myMax = tsLimits.maxPrio; 3246 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3247 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3248 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3249 myMin = fxLimits.minPrio; 3250 myMax = fxLimits.maxPrio; 3251 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict 3252 } else { 3253 // No clue - punt 3254 if (ThreadPriorityVerbose) { 3255 tty->print_cr("Unknown scheduling class: %s ... \n", 3256 ClassInfo.pc_clname); 3257 } 3258 return EINVAL; // no clue, punt 3259 } 3260 3261 if (ThreadPriorityVerbose) { 3262 tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax); 3263 } 3264 3265 priocntl_enable = true; // Enable changing priorities 3266 return 0; 3267} 3268 3269#define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3270#define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3271#define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3272#define FXPRI(x) ((fxparms_t *)((x).pc_clparms)) 3273 3274 3275// scale_to_lwp_priority 3276// 3277// Convert from the libthread "thr_setprio" scale to our current 3278// lwp scheduling class scale. 3279// 3280static int scale_to_lwp_priority(int rMin, int rMax, int x) { 3281 int v; 3282 3283 if (x == 127) return rMax; // avoid round-down 3284 v = (((x*(rMax-rMin)))/128)+rMin; 3285 return v; 3286} 3287 3288 3289// set_lwp_class_and_priority 3290int set_lwp_class_and_priority(int ThreadID, int lwpid, 3291 int newPrio, int new_class, bool scale) { 3292 int rslt; 3293 int Actual, Expected, prv; 3294 pcparms_t ParmInfo; // for GET-SET 3295#ifdef ASSERT 3296 pcparms_t ReadBack; // for readback 3297#endif 3298 3299 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3300 // Query current values. 3301 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3302 // Cache "pcparms_t" in global ParmCache. 3303 // TODO: elide set-to-same-value 3304 3305 // If something went wrong on init, don't change priorities. 3306 if (!priocntl_enable) { 3307 if (ThreadPriorityVerbose) { 3308 tty->print_cr("Trying to set priority but init failed, ignoring"); 3309 } 3310 return EINVAL; 3311 } 3312 3313 // If lwp hasn't started yet, just return 3314 // the _start routine will call us again. 3315 if (lwpid <= 0) { 3316 if (ThreadPriorityVerbose) { 3317 tty->print_cr("deferring the set_lwp_class_and_priority of thread " 3318 INTPTR_FORMAT " to %d, lwpid not set", 3319 ThreadID, newPrio); 3320 } 3321 return 0; 3322 } 3323 3324 if (ThreadPriorityVerbose) { 3325 tty->print_cr ("set_lwp_class_and_priority(" 3326 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3327 ThreadID, lwpid, newPrio); 3328 } 3329 3330 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3331 ParmInfo.pc_cid = PC_CLNULL; 3332 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3333 if (rslt < 0) return errno; 3334 3335 int cur_class = ParmInfo.pc_cid; 3336 ParmInfo.pc_cid = (id_t)new_class; 3337 3338 if (new_class == rtLimits.schedPolicy) { 3339 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3340 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio, 3341 rtLimits.maxPrio, newPrio) 3342 : newPrio; 3343 rtInfo->rt_tqsecs = RT_NOCHANGE; 3344 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3345 if (ThreadPriorityVerbose) { 3346 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3347 } 3348 } else if (new_class == iaLimits.schedPolicy) { 3349 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3350 int maxClamped = MIN2(iaLimits.maxPrio, 3351 cur_class == new_class 3352 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio); 3353 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio, 3354 maxClamped, newPrio) 3355 : newPrio; 3356 iaInfo->ia_uprilim = cur_class == new_class 3357 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio; 3358 iaInfo->ia_mode = IA_NOCHANGE; 3359 if (ThreadPriorityVerbose) { 3360 tty->print_cr("IA: [%d...%d] %d->%d\n", 3361 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 3362 } 3363 } else if (new_class == tsLimits.schedPolicy) { 3364 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3365 int maxClamped = MIN2(tsLimits.maxPrio, 3366 cur_class == new_class 3367 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio); 3368 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio, 3369 maxClamped, newPrio) 3370 : newPrio; 3371 tsInfo->ts_uprilim = cur_class == new_class 3372 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio; 3373 if (ThreadPriorityVerbose) { 3374 tty->print_cr("TS: [%d...%d] %d->%d\n", 3375 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 3376 } 3377 } else if (new_class == fxLimits.schedPolicy) { 3378 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3379 int maxClamped = MIN2(fxLimits.maxPrio, 3380 cur_class == new_class 3381 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio); 3382 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio, 3383 maxClamped, newPrio) 3384 : newPrio; 3385 fxInfo->fx_uprilim = cur_class == new_class 3386 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio; 3387 fxInfo->fx_tqsecs = FX_NOCHANGE; 3388 fxInfo->fx_tqnsecs = FX_NOCHANGE; 3389 if (ThreadPriorityVerbose) { 3390 tty->print_cr("FX: [%d...%d] %d->%d\n", 3391 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri); 3392 } 3393 } else { 3394 if (ThreadPriorityVerbose) { 3395 tty->print_cr("Unknown new scheduling class %d\n", new_class); 3396 } 3397 return EINVAL; // no clue, punt 3398 } 3399 3400 rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 3401 if (ThreadPriorityVerbose && rslt) { 3402 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 3403 } 3404 if (rslt < 0) return errno; 3405 3406#ifdef ASSERT 3407 // Sanity check: read back what we just attempted to set. 3408 // In theory it could have changed in the interim ... 3409 // 3410 // The priocntl system call is tricky. 3411 // Sometimes it'll validate the priority value argument and 3412 // return EINVAL if unhappy. At other times it fails silently. 3413 // Readbacks are prudent. 3414 3415 if (!ReadBackValidate) return 0; 3416 3417 memset(&ReadBack, 0, sizeof(pcparms_t)); 3418 ReadBack.pc_cid = PC_CLNULL; 3419 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 3420 assert(rslt >= 0, "priocntl failed"); 3421 Actual = Expected = 0xBAD; 3422 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 3423 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3424 Actual = RTPRI(ReadBack)->rt_pri; 3425 Expected = RTPRI(ParmInfo)->rt_pri; 3426 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3427 Actual = IAPRI(ReadBack)->ia_upri; 3428 Expected = IAPRI(ParmInfo)->ia_upri; 3429 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3430 Actual = TSPRI(ReadBack)->ts_upri; 3431 Expected = TSPRI(ParmInfo)->ts_upri; 3432 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3433 Actual = FXPRI(ReadBack)->fx_upri; 3434 Expected = FXPRI(ParmInfo)->fx_upri; 3435 } else { 3436 if (ThreadPriorityVerbose) { 3437 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n", 3438 ParmInfo.pc_cid); 3439 } 3440 } 3441 3442 if (Actual != Expected) { 3443 if (ThreadPriorityVerbose) { 3444 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 3445 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 3446 } 3447 } 3448#endif 3449 3450 return 0; 3451} 3452 3453// Solaris only gives access to 128 real priorities at a time, 3454// so we expand Java's ten to fill this range. This would be better 3455// if we dynamically adjusted relative priorities. 3456// 3457// The ThreadPriorityPolicy option allows us to select 2 different 3458// priority scales. 3459// 3460// ThreadPriorityPolicy=0 3461// Since the Solaris' default priority is MaximumPriority, we do not 3462// set a priority lower than Max unless a priority lower than 3463// NormPriority is requested. 3464// 3465// ThreadPriorityPolicy=1 3466// This mode causes the priority table to get filled with 3467// linear values. NormPriority get's mapped to 50% of the 3468// Maximum priority an so on. This will cause VM threads 3469// to get unfair treatment against other Solaris processes 3470// which do not explicitly alter their thread priorities. 3471 3472int os::java_to_os_priority[CriticalPriority + 1] = { 3473 -99999, // 0 Entry should never be used 3474 3475 0, // 1 MinPriority 3476 32, // 2 3477 64, // 3 3478 3479 96, // 4 3480 127, // 5 NormPriority 3481 127, // 6 3482 3483 127, // 7 3484 127, // 8 3485 127, // 9 NearMaxPriority 3486 3487 127, // 10 MaxPriority 3488 3489 -criticalPrio // 11 CriticalPriority 3490}; 3491 3492OSReturn os::set_native_priority(Thread* thread, int newpri) { 3493 OSThread* osthread = thread->osthread(); 3494 3495 // Save requested priority in case the thread hasn't been started 3496 osthread->set_native_priority(newpri); 3497 3498 // Check for critical priority request 3499 bool fxcritical = false; 3500 if (newpri == -criticalPrio) { 3501 fxcritical = true; 3502 newpri = criticalPrio; 3503 } 3504 3505 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 3506 if (!UseThreadPriorities) return OS_OK; 3507 3508 int status = 0; 3509 3510 if (!fxcritical) { 3511 // Use thr_setprio only if we have a priority that thr_setprio understands 3512 status = thr_setprio(thread->osthread()->thread_id(), newpri); 3513 } 3514 3515 int lwp_status = 3516 set_lwp_class_and_priority(osthread->thread_id(), 3517 osthread->lwp_id(), 3518 newpri, 3519 fxcritical ? fxLimits.schedPolicy : myClass, 3520 !fxcritical); 3521 if (lwp_status != 0 && fxcritical) { 3522 // Try again, this time without changing the scheduling class 3523 newpri = java_MaxPriority_to_os_priority; 3524 lwp_status = set_lwp_class_and_priority(osthread->thread_id(), 3525 osthread->lwp_id(), 3526 newpri, myClass, false); 3527 } 3528 status |= lwp_status; 3529 return (status == 0) ? OS_OK : OS_ERR; 3530} 3531 3532 3533OSReturn os::get_native_priority(const Thread* const thread, 3534 int *priority_ptr) { 3535 int p; 3536 if (!UseThreadPriorities) { 3537 *priority_ptr = NormalPriority; 3538 return OS_OK; 3539 } 3540 int status = thr_getprio(thread->osthread()->thread_id(), &p); 3541 if (status != 0) { 3542 return OS_ERR; 3543 } 3544 *priority_ptr = p; 3545 return OS_OK; 3546} 3547 3548 3549// Hint to the underlying OS that a task switch would not be good. 3550// Void return because it's a hint and can fail. 3551void os::hint_no_preempt() { 3552 schedctl_start(schedctl_init()); 3553} 3554 3555static void resume_clear_context(OSThread *osthread) { 3556 osthread->set_ucontext(NULL); 3557} 3558 3559static void suspend_save_context(OSThread *osthread, ucontext_t* context) { 3560 osthread->set_ucontext(context); 3561} 3562 3563static PosixSemaphore sr_semaphore; 3564 3565void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) { 3566 // Save and restore errno to avoid confusing native code with EINTR 3567 // after sigsuspend. 3568 int old_errno = errno; 3569 3570 OSThread* osthread = thread->osthread(); 3571 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 3572 3573 os::SuspendResume::State current = osthread->sr.state(); 3574 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 3575 suspend_save_context(osthread, uc); 3576 3577 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 3578 os::SuspendResume::State state = osthread->sr.suspended(); 3579 if (state == os::SuspendResume::SR_SUSPENDED) { 3580 sigset_t suspend_set; // signals for sigsuspend() 3581 3582 // get current set of blocked signals and unblock resume signal 3583 thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set); 3584 sigdelset(&suspend_set, os::Solaris::SIGasync()); 3585 3586 sr_semaphore.signal(); 3587 // wait here until we are resumed 3588 while (1) { 3589 sigsuspend(&suspend_set); 3590 3591 os::SuspendResume::State result = osthread->sr.running(); 3592 if (result == os::SuspendResume::SR_RUNNING) { 3593 sr_semaphore.signal(); 3594 break; 3595 } 3596 } 3597 3598 } else if (state == os::SuspendResume::SR_RUNNING) { 3599 // request was cancelled, continue 3600 } else { 3601 ShouldNotReachHere(); 3602 } 3603 3604 resume_clear_context(osthread); 3605 } else if (current == os::SuspendResume::SR_RUNNING) { 3606 // request was cancelled, continue 3607 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 3608 // ignore 3609 } else { 3610 // ignore 3611 } 3612 3613 errno = old_errno; 3614} 3615 3616void os::print_statistics() { 3617} 3618 3619int os::message_box(const char* title, const char* message) { 3620 int i; 3621 fdStream err(defaultStream::error_fd()); 3622 for (i = 0; i < 78; i++) err.print_raw("="); 3623 err.cr(); 3624 err.print_raw_cr(title); 3625 for (i = 0; i < 78; i++) err.print_raw("-"); 3626 err.cr(); 3627 err.print_raw_cr(message); 3628 for (i = 0; i < 78; i++) err.print_raw("="); 3629 err.cr(); 3630 3631 char buf[16]; 3632 // Prevent process from exiting upon "read error" without consuming all CPU 3633 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 3634 3635 return buf[0] == 'y' || buf[0] == 'Y'; 3636} 3637 3638static int sr_notify(OSThread* osthread) { 3639 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); 3640 assert_status(status == 0, status, "thr_kill"); 3641 return status; 3642} 3643 3644// "Randomly" selected value for how long we want to spin 3645// before bailing out on suspending a thread, also how often 3646// we send a signal to a thread we want to resume 3647static const int RANDOMLY_LARGE_INTEGER = 1000000; 3648static const int RANDOMLY_LARGE_INTEGER2 = 100; 3649 3650static bool do_suspend(OSThread* osthread) { 3651 assert(osthread->sr.is_running(), "thread should be running"); 3652 assert(!sr_semaphore.trywait(), "semaphore has invalid state"); 3653 3654 // mark as suspended and send signal 3655 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 3656 // failed to switch, state wasn't running? 3657 ShouldNotReachHere(); 3658 return false; 3659 } 3660 3661 if (sr_notify(osthread) != 0) { 3662 ShouldNotReachHere(); 3663 } 3664 3665 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 3666 while (true) { 3667 if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) { 3668 break; 3669 } else { 3670 // timeout 3671 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 3672 if (cancelled == os::SuspendResume::SR_RUNNING) { 3673 return false; 3674 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 3675 // make sure that we consume the signal on the semaphore as well 3676 sr_semaphore.wait(); 3677 break; 3678 } else { 3679 ShouldNotReachHere(); 3680 return false; 3681 } 3682 } 3683 } 3684 3685 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 3686 return true; 3687} 3688 3689static void do_resume(OSThread* osthread) { 3690 assert(osthread->sr.is_suspended(), "thread should be suspended"); 3691 assert(!sr_semaphore.trywait(), "invalid semaphore state"); 3692 3693 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 3694 // failed to switch to WAKEUP_REQUEST 3695 ShouldNotReachHere(); 3696 return; 3697 } 3698 3699 while (true) { 3700 if (sr_notify(osthread) == 0) { 3701 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { 3702 if (osthread->sr.is_running()) { 3703 return; 3704 } 3705 } 3706 } else { 3707 ShouldNotReachHere(); 3708 } 3709 } 3710 3711 guarantee(osthread->sr.is_running(), "Must be running!"); 3712} 3713 3714void os::SuspendedThreadTask::internal_do_task() { 3715 if (do_suspend(_thread->osthread())) { 3716 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 3717 do_task(context); 3718 do_resume(_thread->osthread()); 3719 } 3720} 3721 3722class PcFetcher : public os::SuspendedThreadTask { 3723 public: 3724 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} 3725 ExtendedPC result(); 3726 protected: 3727 void do_task(const os::SuspendedThreadTaskContext& context); 3728 private: 3729 ExtendedPC _epc; 3730}; 3731 3732ExtendedPC PcFetcher::result() { 3733 guarantee(is_done(), "task is not done yet."); 3734 return _epc; 3735} 3736 3737void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { 3738 Thread* thread = context.thread(); 3739 OSThread* osthread = thread->osthread(); 3740 if (osthread->ucontext() != NULL) { 3741 _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext()); 3742 } else { 3743 // NULL context is unexpected, double-check this is the VMThread 3744 guarantee(thread->is_VM_thread(), "can only be called for VMThread"); 3745 } 3746} 3747 3748// A lightweight implementation that does not suspend the target thread and 3749// thus returns only a hint. Used for profiling only! 3750ExtendedPC os::get_thread_pc(Thread* thread) { 3751 // Make sure that it is called by the watcher and the Threads lock is owned. 3752 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 3753 // For now, is only used to profile the VM Thread 3754 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 3755 PcFetcher fetcher(thread); 3756 fetcher.run(); 3757 return fetcher.result(); 3758} 3759 3760 3761// This does not do anything on Solaris. This is basically a hook for being 3762// able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 3763void os::os_exception_wrapper(java_call_t f, JavaValue* value, 3764 const methodHandle& method, JavaCallArguments* args, 3765 Thread* thread) { 3766 f(value, method, args, thread); 3767} 3768 3769// This routine may be used by user applications as a "hook" to catch signals. 3770// The user-defined signal handler must pass unrecognized signals to this 3771// routine, and if it returns true (non-zero), then the signal handler must 3772// return immediately. If the flag "abort_if_unrecognized" is true, then this 3773// routine will never retun false (zero), but instead will execute a VM panic 3774// routine kill the process. 3775// 3776// If this routine returns false, it is OK to call it again. This allows 3777// the user-defined signal handler to perform checks either before or after 3778// the VM performs its own checks. Naturally, the user code would be making 3779// a serious error if it tried to handle an exception (such as a null check 3780// or breakpoint) that the VM was generating for its own correct operation. 3781// 3782// This routine may recognize any of the following kinds of signals: 3783// SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 3784// os::Solaris::SIGasync 3785// It should be consulted by handlers for any of those signals. 3786// 3787// The caller of this routine must pass in the three arguments supplied 3788// to the function referred to in the "sa_sigaction" (not the "sa_handler") 3789// field of the structure passed to sigaction(). This routine assumes that 3790// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 3791// 3792// Note that the VM will print warnings if it detects conflicting signal 3793// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 3794// 3795extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo, 3796 siginfo_t* siginfo, 3797 void* ucontext, 3798 int abort_if_unrecognized); 3799 3800 3801void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 3802 int orig_errno = errno; // Preserve errno value over signal handler. 3803 JVM_handle_solaris_signal(sig, info, ucVoid, true); 3804 errno = orig_errno; 3805} 3806 3807// This boolean allows users to forward their own non-matching signals 3808// to JVM_handle_solaris_signal, harmlessly. 3809bool os::Solaris::signal_handlers_are_installed = false; 3810 3811// For signal-chaining 3812bool os::Solaris::libjsig_is_loaded = false; 3813typedef struct sigaction *(*get_signal_t)(int); 3814get_signal_t os::Solaris::get_signal_action = NULL; 3815 3816struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 3817 struct sigaction *actp = NULL; 3818 3819 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 3820 // Retrieve the old signal handler from libjsig 3821 actp = (*get_signal_action)(sig); 3822 } 3823 if (actp == NULL) { 3824 // Retrieve the preinstalled signal handler from jvm 3825 actp = get_preinstalled_handler(sig); 3826 } 3827 3828 return actp; 3829} 3830 3831static bool call_chained_handler(struct sigaction *actp, int sig, 3832 siginfo_t *siginfo, void *context) { 3833 // Call the old signal handler 3834 if (actp->sa_handler == SIG_DFL) { 3835 // It's more reasonable to let jvm treat it as an unexpected exception 3836 // instead of taking the default action. 3837 return false; 3838 } else if (actp->sa_handler != SIG_IGN) { 3839 if ((actp->sa_flags & SA_NODEFER) == 0) { 3840 // automaticlly block the signal 3841 sigaddset(&(actp->sa_mask), sig); 3842 } 3843 3844 sa_handler_t hand; 3845 sa_sigaction_t sa; 3846 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 3847 // retrieve the chained handler 3848 if (siginfo_flag_set) { 3849 sa = actp->sa_sigaction; 3850 } else { 3851 hand = actp->sa_handler; 3852 } 3853 3854 if ((actp->sa_flags & SA_RESETHAND) != 0) { 3855 actp->sa_handler = SIG_DFL; 3856 } 3857 3858 // try to honor the signal mask 3859 sigset_t oset; 3860 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 3861 3862 // call into the chained handler 3863 if (siginfo_flag_set) { 3864 (*sa)(sig, siginfo, context); 3865 } else { 3866 (*hand)(sig); 3867 } 3868 3869 // restore the signal mask 3870 thr_sigsetmask(SIG_SETMASK, &oset, 0); 3871 } 3872 // Tell jvm's signal handler the signal is taken care of. 3873 return true; 3874} 3875 3876bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 3877 bool chained = false; 3878 // signal-chaining 3879 if (UseSignalChaining) { 3880 struct sigaction *actp = get_chained_signal_action(sig); 3881 if (actp != NULL) { 3882 chained = call_chained_handler(actp, sig, siginfo, context); 3883 } 3884 } 3885 return chained; 3886} 3887 3888struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 3889 assert((chainedsigactions != (struct sigaction *)NULL) && 3890 (preinstalled_sigs != (int *)NULL), "signals not yet initialized"); 3891 if (preinstalled_sigs[sig] != 0) { 3892 return &chainedsigactions[sig]; 3893 } 3894 return NULL; 3895} 3896 3897void os::Solaris::save_preinstalled_handler(int sig, 3898 struct sigaction& oldAct) { 3899 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 3900 assert((chainedsigactions != (struct sigaction *)NULL) && 3901 (preinstalled_sigs != (int *)NULL), "signals not yet initialized"); 3902 chainedsigactions[sig] = oldAct; 3903 preinstalled_sigs[sig] = 1; 3904} 3905 3906void os::Solaris::set_signal_handler(int sig, bool set_installed, 3907 bool oktochain) { 3908 // Check for overwrite. 3909 struct sigaction oldAct; 3910 sigaction(sig, (struct sigaction*)NULL, &oldAct); 3911 void* oldhand = 3912 oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3913 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3914 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 3915 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 3916 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 3917 if (AllowUserSignalHandlers || !set_installed) { 3918 // Do not overwrite; user takes responsibility to forward to us. 3919 return; 3920 } else if (UseSignalChaining) { 3921 if (oktochain) { 3922 // save the old handler in jvm 3923 save_preinstalled_handler(sig, oldAct); 3924 } else { 3925 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 3926 } 3927 // libjsig also interposes the sigaction() call below and saves the 3928 // old sigaction on it own. 3929 } else { 3930 fatal("Encountered unexpected pre-existing sigaction handler " 3931 "%#lx for signal %d.", (long)oldhand, sig); 3932 } 3933 } 3934 3935 struct sigaction sigAct; 3936 sigfillset(&(sigAct.sa_mask)); 3937 sigAct.sa_handler = SIG_DFL; 3938 3939 sigAct.sa_sigaction = signalHandler; 3940 // Handle SIGSEGV on alternate signal stack if 3941 // not using stack banging 3942 if (!UseStackBanging && sig == SIGSEGV) { 3943 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 3944 } else { 3945 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 3946 } 3947 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 3948 3949 sigaction(sig, &sigAct, &oldAct); 3950 3951 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3952 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3953 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 3954} 3955 3956 3957#define DO_SIGNAL_CHECK(sig) \ 3958 do { \ 3959 if (!sigismember(&check_signal_done, sig)) { \ 3960 os::Solaris::check_signal_handler(sig); \ 3961 } \ 3962 } while (0) 3963 3964// This method is a periodic task to check for misbehaving JNI applications 3965// under CheckJNI, we can add any periodic checks here 3966 3967void os::run_periodic_checks() { 3968 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 3969 // thereby preventing a NULL checks. 3970 if (!check_addr0_done) check_addr0_done = check_addr0(tty); 3971 3972 if (check_signals == false) return; 3973 3974 // SEGV and BUS if overridden could potentially prevent 3975 // generation of hs*.log in the event of a crash, debugging 3976 // such a case can be very challenging, so we absolutely 3977 // check for the following for a good measure: 3978 DO_SIGNAL_CHECK(SIGSEGV); 3979 DO_SIGNAL_CHECK(SIGILL); 3980 DO_SIGNAL_CHECK(SIGFPE); 3981 DO_SIGNAL_CHECK(SIGBUS); 3982 DO_SIGNAL_CHECK(SIGPIPE); 3983 DO_SIGNAL_CHECK(SIGXFSZ); 3984 3985 // ReduceSignalUsage allows the user to override these handlers 3986 // see comments at the very top and jvm_solaris.h 3987 if (!ReduceSignalUsage) { 3988 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 3989 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 3990 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 3991 DO_SIGNAL_CHECK(BREAK_SIGNAL); 3992 } 3993 3994 // See comments above for using JVM1/JVM2 and UseAltSigs 3995 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 3996 3997} 3998 3999typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4000 4001static os_sigaction_t os_sigaction = NULL; 4002 4003void os::Solaris::check_signal_handler(int sig) { 4004 char buf[O_BUFLEN]; 4005 address jvmHandler = NULL; 4006 4007 struct sigaction act; 4008 if (os_sigaction == NULL) { 4009 // only trust the default sigaction, in case it has been interposed 4010 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4011 if (os_sigaction == NULL) return; 4012 } 4013 4014 os_sigaction(sig, (struct sigaction*)NULL, &act); 4015 4016 address thisHandler = (act.sa_flags & SA_SIGINFO) 4017 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4018 : CAST_FROM_FN_PTR(address, act.sa_handler); 4019 4020 4021 switch (sig) { 4022 case SIGSEGV: 4023 case SIGBUS: 4024 case SIGFPE: 4025 case SIGPIPE: 4026 case SIGXFSZ: 4027 case SIGILL: 4028 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4029 break; 4030 4031 case SHUTDOWN1_SIGNAL: 4032 case SHUTDOWN2_SIGNAL: 4033 case SHUTDOWN3_SIGNAL: 4034 case BREAK_SIGNAL: 4035 jvmHandler = (address)user_handler(); 4036 break; 4037 4038 default: 4039 int asynsig = os::Solaris::SIGasync(); 4040 4041 if (sig == asynsig) { 4042 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4043 } else { 4044 return; 4045 } 4046 break; 4047 } 4048 4049 4050 if (thisHandler != jvmHandler) { 4051 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4052 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4053 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4054 // No need to check this sig any longer 4055 sigaddset(&check_signal_done, sig); 4056 // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN 4057 if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) { 4058 tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell", 4059 exception_name(sig, buf, O_BUFLEN)); 4060 } 4061 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4062 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4063 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4064 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4065 // No need to check this sig any longer 4066 sigaddset(&check_signal_done, sig); 4067 } 4068 4069 // Print all the signal handler state 4070 if (sigismember(&check_signal_done, sig)) { 4071 print_signal_handlers(tty, buf, O_BUFLEN); 4072 } 4073 4074} 4075 4076void os::Solaris::install_signal_handlers() { 4077 bool libjsigdone = false; 4078 signal_handlers_are_installed = true; 4079 4080 // signal-chaining 4081 typedef void (*signal_setting_t)(); 4082 signal_setting_t begin_signal_setting = NULL; 4083 signal_setting_t end_signal_setting = NULL; 4084 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4085 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4086 if (begin_signal_setting != NULL) { 4087 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4088 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4089 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4090 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4091 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4092 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4093 libjsig_is_loaded = true; 4094 if (os::Solaris::get_libjsig_version != NULL) { 4095 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4096 } 4097 assert(UseSignalChaining, "should enable signal-chaining"); 4098 } 4099 if (libjsig_is_loaded) { 4100 // Tell libjsig jvm is setting signal handlers 4101 (*begin_signal_setting)(); 4102 } 4103 4104 set_signal_handler(SIGSEGV, true, true); 4105 set_signal_handler(SIGPIPE, true, true); 4106 set_signal_handler(SIGXFSZ, true, true); 4107 set_signal_handler(SIGBUS, true, true); 4108 set_signal_handler(SIGILL, true, true); 4109 set_signal_handler(SIGFPE, true, true); 4110 4111 4112 if (os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4113 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4114 // can not register overridable signals which might be > 32 4115 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4116 // Tell libjsig jvm has finished setting signal handlers 4117 (*end_signal_setting)(); 4118 libjsigdone = true; 4119 } 4120 } 4121 4122 set_signal_handler(os::Solaris::SIGasync(), true, true); 4123 4124 if (libjsig_is_loaded && !libjsigdone) { 4125 // Tell libjsig jvm finishes setting signal handlers 4126 (*end_signal_setting)(); 4127 } 4128 4129 // We don't activate signal checker if libjsig is in place, we trust ourselves 4130 // and if UserSignalHandler is installed all bets are off. 4131 // Log that signal checking is off only if -verbose:jni is specified. 4132 if (CheckJNICalls) { 4133 if (libjsig_is_loaded) { 4134 if (PrintJNIResolving) { 4135 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4136 } 4137 check_signals = false; 4138 } 4139 if (AllowUserSignalHandlers) { 4140 if (PrintJNIResolving) { 4141 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4142 } 4143 check_signals = false; 4144 } 4145 } 4146} 4147 4148 4149void report_error(const char* file_name, int line_no, const char* title, 4150 const char* format, ...); 4151 4152const char * signames[] = { 4153 "SIG0", 4154 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4155 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4156 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4157 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4158 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4159 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4160 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4161 "SIGCANCEL", "SIGLOST" 4162}; 4163 4164const char* os::exception_name(int exception_code, char* buf, size_t size) { 4165 if (0 < exception_code && exception_code <= SIGRTMAX) { 4166 // signal 4167 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4168 jio_snprintf(buf, size, "%s", signames[exception_code]); 4169 } else { 4170 jio_snprintf(buf, size, "SIG%d", exception_code); 4171 } 4172 return buf; 4173 } else { 4174 return NULL; 4175 } 4176} 4177 4178// (Static) wrapper for getisax(2) call. 4179os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4180 4181// (Static) wrappers for the liblgrp API 4182os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4183os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4184os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4185os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4186os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4187os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4188os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4189os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4190os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4191 4192// (Static) wrapper for meminfo() call. 4193os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4194 4195static address resolve_symbol_lazy(const char* name) { 4196 address addr = (address) dlsym(RTLD_DEFAULT, name); 4197 if (addr == NULL) { 4198 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4199 addr = (address) dlsym(RTLD_NEXT, name); 4200 } 4201 return addr; 4202} 4203 4204static address resolve_symbol(const char* name) { 4205 address addr = resolve_symbol_lazy(name); 4206 if (addr == NULL) { 4207 fatal(dlerror()); 4208 } 4209 return addr; 4210} 4211 4212void os::Solaris::libthread_init() { 4213 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4214 4215 lwp_priocntl_init(); 4216 4217 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4218 if (func == NULL) { 4219 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4220 // Guarantee that this VM is running on an new enough OS (5.6 or 4221 // later) that it will have a new enough libthread.so. 4222 guarantee(func != NULL, "libthread.so is too old."); 4223 } 4224 4225 int size; 4226 void (*handler_info_func)(address *, int *); 4227 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4228 handler_info_func(&handler_start, &size); 4229 handler_end = handler_start + size; 4230} 4231 4232 4233int_fnP_mutex_tP os::Solaris::_mutex_lock; 4234int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4235int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4236int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4237int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4238int os::Solaris::_mutex_scope = USYNC_THREAD; 4239 4240int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4241int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4242int_fnP_cond_tP os::Solaris::_cond_signal; 4243int_fnP_cond_tP os::Solaris::_cond_broadcast; 4244int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4245int_fnP_cond_tP os::Solaris::_cond_destroy; 4246int os::Solaris::_cond_scope = USYNC_THREAD; 4247 4248void os::Solaris::synchronization_init() { 4249 if (UseLWPSynchronization) { 4250 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4251 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4252 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4253 os::Solaris::set_mutex_init(lwp_mutex_init); 4254 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4255 os::Solaris::set_mutex_scope(USYNC_THREAD); 4256 4257 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4258 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4259 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4260 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4261 os::Solaris::set_cond_init(lwp_cond_init); 4262 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4263 os::Solaris::set_cond_scope(USYNC_THREAD); 4264 } else { 4265 os::Solaris::set_mutex_scope(USYNC_THREAD); 4266 os::Solaris::set_cond_scope(USYNC_THREAD); 4267 4268 if (UsePthreads) { 4269 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4270 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4271 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4272 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4273 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4274 4275 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4276 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4277 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4278 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4279 os::Solaris::set_cond_init(pthread_cond_default_init); 4280 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4281 } else { 4282 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4283 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4284 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4285 os::Solaris::set_mutex_init(::mutex_init); 4286 os::Solaris::set_mutex_destroy(::mutex_destroy); 4287 4288 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4289 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4290 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4291 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4292 os::Solaris::set_cond_init(::cond_init); 4293 os::Solaris::set_cond_destroy(::cond_destroy); 4294 } 4295 } 4296} 4297 4298bool os::Solaris::liblgrp_init() { 4299 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4300 if (handle != NULL) { 4301 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4302 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4303 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4304 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4305 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4306 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4307 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4308 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4309 dlsym(handle, "lgrp_cookie_stale"))); 4310 4311 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4312 set_lgrp_cookie(c); 4313 return true; 4314 } 4315 return false; 4316} 4317 4318void os::Solaris::misc_sym_init() { 4319 address func; 4320 4321 // getisax 4322 func = resolve_symbol_lazy("getisax"); 4323 if (func != NULL) { 4324 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4325 } 4326 4327 // meminfo 4328 func = resolve_symbol_lazy("meminfo"); 4329 if (func != NULL) { 4330 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4331 } 4332} 4333 4334uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4335 assert(_getisax != NULL, "_getisax not set"); 4336 return _getisax(array, n); 4337} 4338 4339// int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 4340typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 4341static pset_getloadavg_type pset_getloadavg_ptr = NULL; 4342 4343void init_pset_getloadavg_ptr(void) { 4344 pset_getloadavg_ptr = 4345 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 4346 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 4347 warning("pset_getloadavg function not found"); 4348 } 4349} 4350 4351int os::Solaris::_dev_zero_fd = -1; 4352 4353// this is called _before_ the global arguments have been parsed 4354void os::init(void) { 4355 _initial_pid = getpid(); 4356 4357 max_hrtime = first_hrtime = gethrtime(); 4358 4359 init_random(1234567); 4360 4361 page_size = sysconf(_SC_PAGESIZE); 4362 if (page_size == -1) { 4363 fatal("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno)); 4364 } 4365 init_page_sizes((size_t) page_size); 4366 4367 Solaris::initialize_system_info(); 4368 4369 // Initialize misc. symbols as soon as possible, so we can use them 4370 // if we need them. 4371 Solaris::misc_sym_init(); 4372 4373 int fd = ::open("/dev/zero", O_RDWR); 4374 if (fd < 0) { 4375 fatal("os::init: cannot open /dev/zero (%s)", strerror(errno)); 4376 } else { 4377 Solaris::set_dev_zero_fd(fd); 4378 4379 // Close on exec, child won't inherit. 4380 fcntl(fd, F_SETFD, FD_CLOEXEC); 4381 } 4382 4383 clock_tics_per_sec = CLK_TCK; 4384 4385 // check if dladdr1() exists; dladdr1 can provide more information than 4386 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 4387 // and is available on linker patches for 5.7 and 5.8. 4388 // libdl.so must have been loaded, this call is just an entry lookup 4389 void * hdl = dlopen("libdl.so", RTLD_NOW); 4390 if (hdl) { 4391 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 4392 } 4393 4394 // (Solaris only) this switches to calls that actually do locking. 4395 ThreadCritical::initialize(); 4396 4397 main_thread = thr_self(); 4398 4399 // Constant minimum stack size allowed. It must be at least 4400 // the minimum of what the OS supports (thr_min_stack()), and 4401 // enough to allow the thread to get to user bytecode execution. 4402 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 4403 // If the pagesize of the VM is greater than 8K determine the appropriate 4404 // number of initial guard pages. The user can change this with the 4405 // command line arguments, if needed. 4406 if (vm_page_size() > 8*K) { 4407 StackYellowPages = 1; 4408 StackRedPages = 1; 4409 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 4410 } 4411} 4412 4413// To install functions for atexit system call 4414extern "C" { 4415 static void perfMemory_exit_helper() { 4416 perfMemory_exit(); 4417 } 4418} 4419 4420// this is called _after_ the global arguments have been parsed 4421jint os::init_2(void) { 4422 // try to enable extended file IO ASAP, see 6431278 4423 os::Solaris::try_enable_extended_io(); 4424 4425 // Allocate a single page and mark it as readable for safepoint polling. Also 4426 // use this first mmap call to check support for MAP_ALIGN. 4427 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 4428 page_size, 4429 MAP_PRIVATE | MAP_ALIGN, 4430 PROT_READ); 4431 if (polling_page == NULL) { 4432 has_map_align = false; 4433 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 4434 PROT_READ); 4435 } 4436 4437 os::set_polling_page(polling_page); 4438 4439#ifndef PRODUCT 4440 if (Verbose && PrintMiscellaneous) { 4441 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", 4442 (intptr_t)polling_page); 4443 } 4444#endif 4445 4446 if (!UseMembar) { 4447 address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE); 4448 guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 4449 os::set_memory_serialize_page(mem_serialize_page); 4450 4451#ifndef PRODUCT 4452 if (Verbose && PrintMiscellaneous) { 4453 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", 4454 (intptr_t)mem_serialize_page); 4455 } 4456#endif 4457 } 4458 4459 // Check minimum allowable stack size for thread creation and to initialize 4460 // the java system classes, including StackOverflowError - depends on page 4461 // size. Add a page for compiler2 recursion in main thread. 4462 // Add in 2*BytesPerWord times page size to account for VM stack during 4463 // class initialization depending on 32 or 64 bit VM. 4464 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 4465 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4466 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 4467 4468 size_t threadStackSizeInBytes = ThreadStackSize * K; 4469 if (threadStackSizeInBytes != 0 && 4470 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 4471 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 4472 os::Solaris::min_stack_allowed/K); 4473 return JNI_ERR; 4474 } 4475 4476 // For 64kbps there will be a 64kb page size, which makes 4477 // the usable default stack size quite a bit less. Increase the 4478 // stack for 64kb (or any > than 8kb) pages, this increases 4479 // virtual memory fragmentation (since we're not creating the 4480 // stack on a power of 2 boundary. The real fix for this 4481 // should be to fix the guard page mechanism. 4482 4483 if (vm_page_size() > 8*K) { 4484 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 4485 ? threadStackSizeInBytes + 4486 ((StackYellowPages + StackRedPages) * vm_page_size()) 4487 : 0; 4488 ThreadStackSize = threadStackSizeInBytes/K; 4489 } 4490 4491 // Make the stack size a multiple of the page size so that 4492 // the yellow/red zones can be guarded. 4493 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 4494 vm_page_size())); 4495 4496 Solaris::libthread_init(); 4497 4498 if (UseNUMA) { 4499 if (!Solaris::liblgrp_init()) { 4500 UseNUMA = false; 4501 } else { 4502 size_t lgrp_limit = os::numa_get_groups_num(); 4503 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal); 4504 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 4505 FREE_C_HEAP_ARRAY(int, lgrp_ids); 4506 if (lgrp_num < 2) { 4507 // There's only one locality group, disable NUMA. 4508 UseNUMA = false; 4509 } 4510 } 4511 if (!UseNUMA && ForceNUMA) { 4512 UseNUMA = true; 4513 } 4514 } 4515 4516 Solaris::signal_sets_init(); 4517 Solaris::init_signal_mem(); 4518 Solaris::install_signal_handlers(); 4519 4520 if (libjsigversion < JSIG_VERSION_1_4_1) { 4521 Maxlibjsigsigs = OLDMAXSIGNUM; 4522 } 4523 4524 // initialize synchronization primitives to use either thread or 4525 // lwp synchronization (controlled by UseLWPSynchronization) 4526 Solaris::synchronization_init(); 4527 4528 if (MaxFDLimit) { 4529 // set the number of file descriptors to max. print out error 4530 // if getrlimit/setrlimit fails but continue regardless. 4531 struct rlimit nbr_files; 4532 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 4533 if (status != 0) { 4534 if (PrintMiscellaneous && (Verbose || WizardMode)) { 4535 perror("os::init_2 getrlimit failed"); 4536 } 4537 } else { 4538 nbr_files.rlim_cur = nbr_files.rlim_max; 4539 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 4540 if (status != 0) { 4541 if (PrintMiscellaneous && (Verbose || WizardMode)) { 4542 perror("os::init_2 setrlimit failed"); 4543 } 4544 } 4545 } 4546 } 4547 4548 // Calculate theoretical max. size of Threads to guard gainst 4549 // artifical out-of-memory situations, where all available address- 4550 // space has been reserved by thread stacks. Default stack size is 1Mb. 4551 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 4552 JavaThread::stack_size_at_create() : (1*K*K); 4553 assert(pre_thread_stack_size != 0, "Must have a stack"); 4554 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 4555 // we should start doing Virtual Memory banging. Currently when the threads will 4556 // have used all but 200Mb of space. 4557 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 4558 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 4559 4560 // at-exit methods are called in the reverse order of their registration. 4561 // In Solaris 7 and earlier, atexit functions are called on return from 4562 // main or as a result of a call to exit(3C). There can be only 32 of 4563 // these functions registered and atexit() does not set errno. In Solaris 4564 // 8 and later, there is no limit to the number of functions registered 4565 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 4566 // functions are called upon dlclose(3DL) in addition to return from main 4567 // and exit(3C). 4568 4569 if (PerfAllowAtExitRegistration) { 4570 // only register atexit functions if PerfAllowAtExitRegistration is set. 4571 // atexit functions can be delayed until process exit time, which 4572 // can be problematic for embedded VM situations. Embedded VMs should 4573 // call DestroyJavaVM() to assure that VM resources are released. 4574 4575 // note: perfMemory_exit_helper atexit function may be removed in 4576 // the future if the appropriate cleanup code can be added to the 4577 // VM_Exit VMOperation's doit method. 4578 if (atexit(perfMemory_exit_helper) != 0) { 4579 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 4580 } 4581 } 4582 4583 // Init pset_loadavg function pointer 4584 init_pset_getloadavg_ptr(); 4585 4586 return JNI_OK; 4587} 4588 4589// Mark the polling page as unreadable 4590void os::make_polling_page_unreadable(void) { 4591 if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) { 4592 fatal("Could not disable polling page"); 4593 } 4594} 4595 4596// Mark the polling page as readable 4597void os::make_polling_page_readable(void) { 4598 if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) { 4599 fatal("Could not enable polling page"); 4600 } 4601} 4602 4603// OS interface. 4604 4605bool os::check_heap(bool force) { return true; } 4606 4607// Is a (classpath) directory empty? 4608bool os::dir_is_empty(const char* path) { 4609 DIR *dir = NULL; 4610 struct dirent *ptr; 4611 4612 dir = opendir(path); 4613 if (dir == NULL) return true; 4614 4615 // Scan the directory 4616 bool result = true; 4617 char buf[sizeof(struct dirent) + MAX_PATH]; 4618 struct dirent *dbuf = (struct dirent *) buf; 4619 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 4620 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 4621 result = false; 4622 } 4623 } 4624 closedir(dir); 4625 return result; 4626} 4627 4628// This code originates from JDK's sysOpen and open64_w 4629// from src/solaris/hpi/src/system_md.c 4630 4631int os::open(const char *path, int oflag, int mode) { 4632 if (strlen(path) > MAX_PATH - 1) { 4633 errno = ENAMETOOLONG; 4634 return -1; 4635 } 4636 int fd; 4637 4638 fd = ::open64(path, oflag, mode); 4639 if (fd == -1) return -1; 4640 4641 // If the open succeeded, the file might still be a directory 4642 { 4643 struct stat64 buf64; 4644 int ret = ::fstat64(fd, &buf64); 4645 int st_mode = buf64.st_mode; 4646 4647 if (ret != -1) { 4648 if ((st_mode & S_IFMT) == S_IFDIR) { 4649 errno = EISDIR; 4650 ::close(fd); 4651 return -1; 4652 } 4653 } else { 4654 ::close(fd); 4655 return -1; 4656 } 4657 } 4658 4659 // 32-bit Solaris systems suffer from: 4660 // 4661 // - an historical default soft limit of 256 per-process file 4662 // descriptors that is too low for many Java programs. 4663 // 4664 // - a design flaw where file descriptors created using stdio 4665 // fopen must be less than 256, _even_ when the first limit above 4666 // has been raised. This can cause calls to fopen (but not calls to 4667 // open, for example) to fail mysteriously, perhaps in 3rd party 4668 // native code (although the JDK itself uses fopen). One can hardly 4669 // criticize them for using this most standard of all functions. 4670 // 4671 // We attempt to make everything work anyways by: 4672 // 4673 // - raising the soft limit on per-process file descriptors beyond 4674 // 256 4675 // 4676 // - As of Solaris 10u4, we can request that Solaris raise the 256 4677 // stdio fopen limit by calling function enable_extended_FILE_stdio. 4678 // This is done in init_2 and recorded in enabled_extended_FILE_stdio 4679 // 4680 // - If we are stuck on an old (pre 10u4) Solaris system, we can 4681 // workaround the bug by remapping non-stdio file descriptors below 4682 // 256 to ones beyond 256, which is done below. 4683 // 4684 // See: 4685 // 1085341: 32-bit stdio routines should support file descriptors >255 4686 // 6533291: Work around 32-bit Solaris stdio limit of 256 open files 4687 // 6431278: Netbeans crash on 32 bit Solaris: need to call 4688 // enable_extended_FILE_stdio() in VM initialisation 4689 // Giri Mandalika's blog 4690 // http://technopark02.blogspot.com/2005_05_01_archive.html 4691 // 4692#ifndef _LP64 4693 if ((!enabled_extended_FILE_stdio) && fd < 256) { 4694 int newfd = ::fcntl(fd, F_DUPFD, 256); 4695 if (newfd != -1) { 4696 ::close(fd); 4697 fd = newfd; 4698 } 4699 } 4700#endif // 32-bit Solaris 4701 4702 // All file descriptors that are opened in the JVM and not 4703 // specifically destined for a subprocess should have the 4704 // close-on-exec flag set. If we don't set it, then careless 3rd 4705 // party native code might fork and exec without closing all 4706 // appropriate file descriptors (e.g. as we do in closeDescriptors in 4707 // UNIXProcess.c), and this in turn might: 4708 // 4709 // - cause end-of-file to fail to be detected on some file 4710 // descriptors, resulting in mysterious hangs, or 4711 // 4712 // - might cause an fopen in the subprocess to fail on a system 4713 // suffering from bug 1085341. 4714 // 4715 // (Yes, the default setting of the close-on-exec flag is a Unix 4716 // design flaw) 4717 // 4718 // See: 4719 // 1085341: 32-bit stdio routines should support file descriptors >255 4720 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed 4721 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 4722 // 4723#ifdef FD_CLOEXEC 4724 { 4725 int flags = ::fcntl(fd, F_GETFD); 4726 if (flags != -1) { 4727 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 4728 } 4729 } 4730#endif 4731 4732 return fd; 4733} 4734 4735// create binary file, rewriting existing file if required 4736int os::create_binary_file(const char* path, bool rewrite_existing) { 4737 int oflags = O_WRONLY | O_CREAT; 4738 if (!rewrite_existing) { 4739 oflags |= O_EXCL; 4740 } 4741 return ::open64(path, oflags, S_IREAD | S_IWRITE); 4742} 4743 4744// return current position of file pointer 4745jlong os::current_file_offset(int fd) { 4746 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 4747} 4748 4749// move file pointer to the specified offset 4750jlong os::seek_to_file_offset(int fd, jlong offset) { 4751 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 4752} 4753 4754jlong os::lseek(int fd, jlong offset, int whence) { 4755 return (jlong) ::lseek64(fd, offset, whence); 4756} 4757 4758char * os::native_path(char *path) { 4759 return path; 4760} 4761 4762int os::ftruncate(int fd, jlong length) { 4763 return ::ftruncate64(fd, length); 4764} 4765 4766int os::fsync(int fd) { 4767 RESTARTABLE_RETURN_INT(::fsync(fd)); 4768} 4769 4770int os::available(int fd, jlong *bytes) { 4771 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 4772 "Assumed _thread_in_native"); 4773 jlong cur, end; 4774 int mode; 4775 struct stat64 buf64; 4776 4777 if (::fstat64(fd, &buf64) >= 0) { 4778 mode = buf64.st_mode; 4779 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 4780 int n,ioctl_return; 4781 4782 RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return); 4783 if (ioctl_return>= 0) { 4784 *bytes = n; 4785 return 1; 4786 } 4787 } 4788 } 4789 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 4790 return 0; 4791 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 4792 return 0; 4793 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 4794 return 0; 4795 } 4796 *bytes = end - cur; 4797 return 1; 4798} 4799 4800// Map a block of memory. 4801char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4802 char *addr, size_t bytes, bool read_only, 4803 bool allow_exec) { 4804 int prot; 4805 int flags; 4806 4807 if (read_only) { 4808 prot = PROT_READ; 4809 flags = MAP_SHARED; 4810 } else { 4811 prot = PROT_READ | PROT_WRITE; 4812 flags = MAP_PRIVATE; 4813 } 4814 4815 if (allow_exec) { 4816 prot |= PROT_EXEC; 4817 } 4818 4819 if (addr != NULL) { 4820 flags |= MAP_FIXED; 4821 } 4822 4823 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 4824 fd, file_offset); 4825 if (mapped_address == MAP_FAILED) { 4826 return NULL; 4827 } 4828 return mapped_address; 4829} 4830 4831 4832// Remap a block of memory. 4833char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4834 char *addr, size_t bytes, bool read_only, 4835 bool allow_exec) { 4836 // same as map_memory() on this OS 4837 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 4838 allow_exec); 4839} 4840 4841 4842// Unmap a block of memory. 4843bool os::pd_unmap_memory(char* addr, size_t bytes) { 4844 return munmap(addr, bytes) == 0; 4845} 4846 4847void os::pause() { 4848 char filename[MAX_PATH]; 4849 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4850 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4851 } else { 4852 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4853 } 4854 4855 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4856 if (fd != -1) { 4857 struct stat buf; 4858 ::close(fd); 4859 while (::stat(filename, &buf) == 0) { 4860 (void)::poll(NULL, 0, 100); 4861 } 4862 } else { 4863 jio_fprintf(stderr, 4864 "Could not open pause file '%s', continuing immediately.\n", filename); 4865 } 4866} 4867 4868#ifndef PRODUCT 4869#ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 4870// Turn this on if you need to trace synch operations. 4871// Set RECORD_SYNCH_LIMIT to a large-enough value, 4872// and call record_synch_enable and record_synch_disable 4873// around the computation of interest. 4874 4875void record_synch(char* name, bool returning); // defined below 4876 4877class RecordSynch { 4878 char* _name; 4879 public: 4880 RecordSynch(char* name) :_name(name) { record_synch(_name, false); } 4881 ~RecordSynch() { record_synch(_name, true); } 4882}; 4883 4884#define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 4885extern "C" ret name params { \ 4886 typedef ret name##_t params; \ 4887 static name##_t* implem = NULL; \ 4888 static int callcount = 0; \ 4889 if (implem == NULL) { \ 4890 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 4891 if (implem == NULL) fatal(dlerror()); \ 4892 } \ 4893 ++callcount; \ 4894 RecordSynch _rs(#name); \ 4895 inner; \ 4896 return implem args; \ 4897} 4898// in dbx, examine callcounts this way: 4899// for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 4900 4901#define CHECK_POINTER_OK(p) \ 4902 (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p))) 4903#define CHECK_MU \ 4904 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 4905#define CHECK_CV \ 4906 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 4907#define CHECK_P(p) \ 4908 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 4909 4910#define CHECK_MUTEX(mutex_op) \ 4911 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 4912 4913CHECK_MUTEX( mutex_lock) 4914CHECK_MUTEX( _mutex_lock) 4915CHECK_MUTEX( mutex_unlock) 4916CHECK_MUTEX(_mutex_unlock) 4917CHECK_MUTEX( mutex_trylock) 4918CHECK_MUTEX(_mutex_trylock) 4919 4920#define CHECK_COND(cond_op) \ 4921 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV); 4922 4923CHECK_COND( cond_wait); 4924CHECK_COND(_cond_wait); 4925CHECK_COND(_cond_wait_cancel); 4926 4927#define CHECK_COND2(cond_op) \ 4928 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV); 4929 4930CHECK_COND2( cond_timedwait); 4931CHECK_COND2(_cond_timedwait); 4932CHECK_COND2(_cond_timedwait_cancel); 4933 4934// do the _lwp_* versions too 4935#define mutex_t lwp_mutex_t 4936#define cond_t lwp_cond_t 4937CHECK_MUTEX( _lwp_mutex_lock) 4938CHECK_MUTEX( _lwp_mutex_unlock) 4939CHECK_MUTEX( _lwp_mutex_trylock) 4940CHECK_MUTEX( __lwp_mutex_lock) 4941CHECK_MUTEX( __lwp_mutex_unlock) 4942CHECK_MUTEX( __lwp_mutex_trylock) 4943CHECK_MUTEX(___lwp_mutex_lock) 4944CHECK_MUTEX(___lwp_mutex_unlock) 4945 4946CHECK_COND( _lwp_cond_wait); 4947CHECK_COND( __lwp_cond_wait); 4948CHECK_COND(___lwp_cond_wait); 4949 4950CHECK_COND2( _lwp_cond_timedwait); 4951CHECK_COND2( __lwp_cond_timedwait); 4952#undef mutex_t 4953#undef cond_t 4954 4955CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 4956CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 4957CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 4958CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 4959CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 4960CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 4961CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 4962CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 4963 4964 4965// recording machinery: 4966 4967enum { RECORD_SYNCH_LIMIT = 200 }; 4968char* record_synch_name[RECORD_SYNCH_LIMIT]; 4969void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 4970bool record_synch_returning[RECORD_SYNCH_LIMIT]; 4971thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 4972int record_synch_count = 0; 4973bool record_synch_enabled = false; 4974 4975// in dbx, examine recorded data this way: 4976// for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 4977 4978void record_synch(char* name, bool returning) { 4979 if (record_synch_enabled) { 4980 if (record_synch_count < RECORD_SYNCH_LIMIT) { 4981 record_synch_name[record_synch_count] = name; 4982 record_synch_returning[record_synch_count] = returning; 4983 record_synch_thread[record_synch_count] = thr_self(); 4984 record_synch_arg0ptr[record_synch_count] = &name; 4985 record_synch_count++; 4986 } 4987 // put more checking code here: 4988 // ... 4989 } 4990} 4991 4992void record_synch_enable() { 4993 // start collecting trace data, if not already doing so 4994 if (!record_synch_enabled) record_synch_count = 0; 4995 record_synch_enabled = true; 4996} 4997 4998void record_synch_disable() { 4999 // stop collecting trace data 5000 record_synch_enabled = false; 5001} 5002 5003#endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5004#endif // PRODUCT 5005 5006const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5007const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5008 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5009 5010 5011// JVMTI & JVM monitoring and management support 5012// The thread_cpu_time() and current_thread_cpu_time() are only 5013// supported if is_thread_cpu_time_supported() returns true. 5014// They are not supported on Solaris T1. 5015 5016// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5017// are used by JVM M&M and JVMTI to get user+sys or user CPU time 5018// of a thread. 5019// 5020// current_thread_cpu_time() and thread_cpu_time(Thread *) 5021// returns the fast estimate available on the platform. 5022 5023// hrtime_t gethrvtime() return value includes 5024// user time but does not include system time 5025jlong os::current_thread_cpu_time() { 5026 return (jlong) gethrvtime(); 5027} 5028 5029jlong os::thread_cpu_time(Thread *thread) { 5030 // return user level CPU time only to be consistent with 5031 // what current_thread_cpu_time returns. 5032 // thread_cpu_time_info() must be changed if this changes 5033 return os::thread_cpu_time(thread, false /* user time only */); 5034} 5035 5036jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5037 if (user_sys_cpu_time) { 5038 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5039 } else { 5040 return os::current_thread_cpu_time(); 5041 } 5042} 5043 5044jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5045 char proc_name[64]; 5046 int count; 5047 prusage_t prusage; 5048 jlong lwp_time; 5049 int fd; 5050 5051 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5052 getpid(), 5053 thread->osthread()->lwp_id()); 5054 fd = ::open(proc_name, O_RDONLY); 5055 if (fd == -1) return -1; 5056 5057 do { 5058 count = ::pread(fd, 5059 (void *)&prusage.pr_utime, 5060 thr_time_size, 5061 thr_time_off); 5062 } while (count < 0 && errno == EINTR); 5063 ::close(fd); 5064 if (count < 0) return -1; 5065 5066 if (user_sys_cpu_time) { 5067 // user + system CPU time 5068 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5069 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5070 (jlong)prusage.pr_stime.tv_nsec + 5071 (jlong)prusage.pr_utime.tv_nsec; 5072 } else { 5073 // user level CPU time only 5074 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5075 (jlong)prusage.pr_utime.tv_nsec; 5076 } 5077 5078 return (lwp_time); 5079} 5080 5081void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5082 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5083 info_ptr->may_skip_backward = false; // elapsed time not wall time 5084 info_ptr->may_skip_forward = false; // elapsed time not wall time 5085 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5086} 5087 5088void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5089 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5090 info_ptr->may_skip_backward = false; // elapsed time not wall time 5091 info_ptr->may_skip_forward = false; // elapsed time not wall time 5092 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5093} 5094 5095bool os::is_thread_cpu_time_supported() { 5096 return true; 5097} 5098 5099// System loadavg support. Returns -1 if load average cannot be obtained. 5100// Return the load average for our processor set if the primitive exists 5101// (Solaris 9 and later). Otherwise just return system wide loadavg. 5102int os::loadavg(double loadavg[], int nelem) { 5103 if (pset_getloadavg_ptr != NULL) { 5104 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5105 } else { 5106 return ::getloadavg(loadavg, nelem); 5107 } 5108} 5109 5110//--------------------------------------------------------------------------------- 5111 5112bool os::find(address addr, outputStream* st) { 5113 Dl_info dlinfo; 5114 memset(&dlinfo, 0, sizeof(dlinfo)); 5115 if (dladdr(addr, &dlinfo) != 0) { 5116 st->print(PTR_FORMAT ": ", addr); 5117 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { 5118 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5119 } else if (dlinfo.dli_fbase != NULL) { 5120 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5121 } else { 5122 st->print("<absolute address>"); 5123 } 5124 if (dlinfo.dli_fname != NULL) { 5125 st->print(" in %s", dlinfo.dli_fname); 5126 } 5127 if (dlinfo.dli_fbase != NULL) { 5128 st->print(" at " PTR_FORMAT, dlinfo.dli_fbase); 5129 } 5130 st->cr(); 5131 5132 if (Verbose) { 5133 // decode some bytes around the PC 5134 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size()); 5135 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size()); 5136 address lowest = (address) dlinfo.dli_sname; 5137 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5138 if (begin < lowest) begin = lowest; 5139 Dl_info dlinfo2; 5140 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr 5141 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) { 5142 end = (address) dlinfo2.dli_saddr; 5143 } 5144 Disassembler::decode(begin, end, st); 5145 } 5146 return true; 5147 } 5148 return false; 5149} 5150 5151// Following function has been added to support HotSparc's libjvm.so running 5152// under Solaris production JDK 1.2.2 / 1.3.0. These came from 5153// src/solaris/hpi/native_threads in the EVM codebase. 5154// 5155// NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5156// libraries and should thus be removed. We will leave it behind for a while 5157// until we no longer want to able to run on top of 1.3.0 Solaris production 5158// JDK. See 4341971. 5159 5160#define STACK_SLACK 0x800 5161 5162extern "C" { 5163 intptr_t sysThreadAvailableStackWithSlack() { 5164 stack_t st; 5165 intptr_t retval, stack_top; 5166 retval = thr_stksegment(&st); 5167 assert(retval == 0, "incorrect return value from thr_stksegment"); 5168 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5169 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5170 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5171 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5172 } 5173} 5174 5175// ObjectMonitor park-unpark infrastructure ... 5176// 5177// We implement Solaris and Linux PlatformEvents with the 5178// obvious condvar-mutex-flag triple. 5179// Another alternative that works quite well is pipes: 5180// Each PlatformEvent consists of a pipe-pair. 5181// The thread associated with the PlatformEvent 5182// calls park(), which reads from the input end of the pipe. 5183// Unpark() writes into the other end of the pipe. 5184// The write-side of the pipe must be set NDELAY. 5185// Unfortunately pipes consume a large # of handles. 5186// Native solaris lwp_park() and lwp_unpark() work nicely, too. 5187// Using pipes for the 1st few threads might be workable, however. 5188// 5189// park() is permitted to return spuriously. 5190// Callers of park() should wrap the call to park() in 5191// an appropriate loop. A litmus test for the correct 5192// usage of park is the following: if park() were modified 5193// to immediately return 0 your code should still work, 5194// albeit degenerating to a spin loop. 5195// 5196// In a sense, park()-unpark() just provides more polite spinning 5197// and polling with the key difference over naive spinning being 5198// that a parked thread needs to be explicitly unparked() in order 5199// to wake up and to poll the underlying condition. 5200// 5201// Assumption: 5202// Only one parker can exist on an event, which is why we allocate 5203// them per-thread. Multiple unparkers can coexist. 5204// 5205// _Event transitions in park() 5206// -1 => -1 : illegal 5207// 1 => 0 : pass - return immediately 5208// 0 => -1 : block; then set _Event to 0 before returning 5209// 5210// _Event transitions in unpark() 5211// 0 => 1 : just return 5212// 1 => 1 : just return 5213// -1 => either 0 or 1; must signal target thread 5214// That is, we can safely transition _Event from -1 to either 5215// 0 or 1. 5216// 5217// _Event serves as a restricted-range semaphore. 5218// -1 : thread is blocked, i.e. there is a waiter 5219// 0 : neutral: thread is running or ready, 5220// could have been signaled after a wait started 5221// 1 : signaled - thread is running or ready 5222// 5223// Another possible encoding of _Event would be with 5224// explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5225// 5226// TODO-FIXME: add DTRACE probes for: 5227// 1. Tx parks 5228// 2. Ty unparks Tx 5229// 3. Tx resumes from park 5230 5231 5232// value determined through experimentation 5233#define ROUNDINGFIX 11 5234 5235// utility to compute the abstime argument to timedwait. 5236// TODO-FIXME: switch from compute_abstime() to unpackTime(). 5237 5238static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5239 // millis is the relative timeout time 5240 // abstime will be the absolute timeout time 5241 if (millis < 0) millis = 0; 5242 struct timeval now; 5243 int status = gettimeofday(&now, NULL); 5244 assert(status == 0, "gettimeofday"); 5245 jlong seconds = millis / 1000; 5246 jlong max_wait_period; 5247 5248 if (UseLWPSynchronization) { 5249 // forward port of fix for 4275818 (not sleeping long enough) 5250 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5251 // _lwp_cond_timedwait() used a round_down algorithm rather 5252 // than a round_up. For millis less than our roundfactor 5253 // it rounded down to 0 which doesn't meet the spec. 5254 // For millis > roundfactor we may return a bit sooner, but 5255 // since we can not accurately identify the patch level and 5256 // this has already been fixed in Solaris 9 and 8 we will 5257 // leave it alone rather than always rounding down. 5258 5259 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5260 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5261 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 5262 max_wait_period = 21000000; 5263 } else { 5264 max_wait_period = 50000000; 5265 } 5266 millis %= 1000; 5267 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 5268 seconds = max_wait_period; 5269 } 5270 abstime->tv_sec = now.tv_sec + seconds; 5271 long usec = now.tv_usec + millis * 1000; 5272 if (usec >= 1000000) { 5273 abstime->tv_sec += 1; 5274 usec -= 1000000; 5275 } 5276 abstime->tv_nsec = usec * 1000; 5277 return abstime; 5278} 5279 5280void os::PlatformEvent::park() { // AKA: down() 5281 // Transitions for _Event: 5282 // -1 => -1 : illegal 5283 // 1 => 0 : pass - return immediately 5284 // 0 => -1 : block; then set _Event to 0 before returning 5285 5286 // Invariant: Only the thread associated with the Event/PlatformEvent 5287 // may call park(). 5288 assert(_nParked == 0, "invariant"); 5289 5290 int v; 5291 for (;;) { 5292 v = _Event; 5293 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5294 } 5295 guarantee(v >= 0, "invariant"); 5296 if (v == 0) { 5297 // Do this the hard way by blocking ... 5298 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5299 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5300 // Only for SPARC >= V8PlusA 5301#if defined(__sparc) && defined(COMPILER2) 5302 if (ClearFPUAtPark) { _mark_fpu_nosave(); } 5303#endif 5304 int status = os::Solaris::mutex_lock(_mutex); 5305 assert_status(status == 0, status, "mutex_lock"); 5306 guarantee(_nParked == 0, "invariant"); 5307 ++_nParked; 5308 while (_Event < 0) { 5309 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 5310 // Treat this the same as if the wait was interrupted 5311 // With usr/lib/lwp going to kernel, always handle ETIME 5312 status = os::Solaris::cond_wait(_cond, _mutex); 5313 if (status == ETIME) status = EINTR; 5314 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 5315 } 5316 --_nParked; 5317 _Event = 0; 5318 status = os::Solaris::mutex_unlock(_mutex); 5319 assert_status(status == 0, status, "mutex_unlock"); 5320 // Paranoia to ensure our locked and lock-free paths interact 5321 // correctly with each other. 5322 OrderAccess::fence(); 5323 } 5324} 5325 5326int os::PlatformEvent::park(jlong millis) { 5327 // Transitions for _Event: 5328 // -1 => -1 : illegal 5329 // 1 => 0 : pass - return immediately 5330 // 0 => -1 : block; then set _Event to 0 before returning 5331 5332 guarantee(_nParked == 0, "invariant"); 5333 int v; 5334 for (;;) { 5335 v = _Event; 5336 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5337 } 5338 guarantee(v >= 0, "invariant"); 5339 if (v != 0) return OS_OK; 5340 5341 int ret = OS_TIMEOUT; 5342 timestruc_t abst; 5343 compute_abstime(&abst, millis); 5344 5345 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5346 // For Solaris SPARC set fprs.FEF=0 prior to parking. 5347 // Only for SPARC >= V8PlusA 5348#if defined(__sparc) && defined(COMPILER2) 5349 if (ClearFPUAtPark) { _mark_fpu_nosave(); } 5350#endif 5351 int status = os::Solaris::mutex_lock(_mutex); 5352 assert_status(status == 0, status, "mutex_lock"); 5353 guarantee(_nParked == 0, "invariant"); 5354 ++_nParked; 5355 while (_Event < 0) { 5356 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 5357 assert_status(status == 0 || status == EINTR || 5358 status == ETIME || status == ETIMEDOUT, 5359 status, "cond_timedwait"); 5360 if (!FilterSpuriousWakeups) break; // previous semantics 5361 if (status == ETIME || status == ETIMEDOUT) break; 5362 // We consume and ignore EINTR and spurious wakeups. 5363 } 5364 --_nParked; 5365 if (_Event >= 0) ret = OS_OK; 5366 _Event = 0; 5367 status = os::Solaris::mutex_unlock(_mutex); 5368 assert_status(status == 0, status, "mutex_unlock"); 5369 // Paranoia to ensure our locked and lock-free paths interact 5370 // correctly with each other. 5371 OrderAccess::fence(); 5372 return ret; 5373} 5374 5375void os::PlatformEvent::unpark() { 5376 // Transitions for _Event: 5377 // 0 => 1 : just return 5378 // 1 => 1 : just return 5379 // -1 => either 0 or 1; must signal target thread 5380 // That is, we can safely transition _Event from -1 to either 5381 // 0 or 1. 5382 // See also: "Semaphores in Plan 9" by Mullender & Cox 5383 // 5384 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5385 // that it will take two back-to-back park() calls for the owning 5386 // thread to block. This has the benefit of forcing a spurious return 5387 // from the first park() call after an unpark() call which will help 5388 // shake out uses of park() and unpark() without condition variables. 5389 5390 if (Atomic::xchg(1, &_Event) >= 0) return; 5391 5392 // If the thread associated with the event was parked, wake it. 5393 // Wait for the thread assoc with the PlatformEvent to vacate. 5394 int status = os::Solaris::mutex_lock(_mutex); 5395 assert_status(status == 0, status, "mutex_lock"); 5396 int AnyWaiters = _nParked; 5397 status = os::Solaris::mutex_unlock(_mutex); 5398 assert_status(status == 0, status, "mutex_unlock"); 5399 guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant"); 5400 if (AnyWaiters != 0) { 5401 // Note that we signal() *after* dropping the lock for "immortal" Events. 5402 // This is safe and avoids a common class of futile wakeups. In rare 5403 // circumstances this can cause a thread to return prematurely from 5404 // cond_{timed}wait() but the spurious wakeup is benign and the victim 5405 // will simply re-test the condition and re-park itself. 5406 // This provides particular benefit if the underlying platform does not 5407 // provide wait morphing. 5408 status = os::Solaris::cond_signal(_cond); 5409 assert_status(status == 0, status, "cond_signal"); 5410 } 5411} 5412 5413// JSR166 5414// ------------------------------------------------------- 5415 5416// The solaris and linux implementations of park/unpark are fairly 5417// conservative for now, but can be improved. They currently use a 5418// mutex/condvar pair, plus _counter. 5419// Park decrements _counter if > 0, else does a condvar wait. Unpark 5420// sets count to 1 and signals condvar. Only one thread ever waits 5421// on the condvar. Contention seen when trying to park implies that someone 5422// is unparking you, so don't wait. And spurious returns are fine, so there 5423// is no need to track notifications. 5424 5425#define MAX_SECS 100000000 5426 5427// This code is common to linux and solaris and will be moved to a 5428// common place in dolphin. 5429// 5430// The passed in time value is either a relative time in nanoseconds 5431// or an absolute time in milliseconds. Either way it has to be unpacked 5432// into suitable seconds and nanoseconds components and stored in the 5433// given timespec structure. 5434// Given time is a 64-bit value and the time_t used in the timespec is only 5435// a signed-32-bit value (except on 64-bit Linux) we have to watch for 5436// overflow if times way in the future are given. Further on Solaris versions 5437// prior to 10 there is a restriction (see cond_timedwait) that the specified 5438// number of seconds, in abstime, is less than current_time + 100,000,000. 5439// As it will be 28 years before "now + 100000000" will overflow we can 5440// ignore overflow and just impose a hard-limit on seconds using the value 5441// of "now + 100,000,000". This places a limit on the timeout of about 3.17 5442// years from "now". 5443// 5444static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 5445 assert(time > 0, "convertTime"); 5446 5447 struct timeval now; 5448 int status = gettimeofday(&now, NULL); 5449 assert(status == 0, "gettimeofday"); 5450 5451 time_t max_secs = now.tv_sec + MAX_SECS; 5452 5453 if (isAbsolute) { 5454 jlong secs = time / 1000; 5455 if (secs > max_secs) { 5456 absTime->tv_sec = max_secs; 5457 } else { 5458 absTime->tv_sec = secs; 5459 } 5460 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 5461 } else { 5462 jlong secs = time / NANOSECS_PER_SEC; 5463 if (secs >= MAX_SECS) { 5464 absTime->tv_sec = max_secs; 5465 absTime->tv_nsec = 0; 5466 } else { 5467 absTime->tv_sec = now.tv_sec + secs; 5468 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 5469 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 5470 absTime->tv_nsec -= NANOSECS_PER_SEC; 5471 ++absTime->tv_sec; // note: this must be <= max_secs 5472 } 5473 } 5474 } 5475 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 5476 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 5477 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 5478 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 5479} 5480 5481void Parker::park(bool isAbsolute, jlong time) { 5482 // Ideally we'd do something useful while spinning, such 5483 // as calling unpackTime(). 5484 5485 // Optional fast-path check: 5486 // Return immediately if a permit is available. 5487 // We depend on Atomic::xchg() having full barrier semantics 5488 // since we are doing a lock-free update to _counter. 5489 if (Atomic::xchg(0, &_counter) > 0) return; 5490 5491 // Optional fast-exit: Check interrupt before trying to wait 5492 Thread* thread = Thread::current(); 5493 assert(thread->is_Java_thread(), "Must be JavaThread"); 5494 JavaThread *jt = (JavaThread *)thread; 5495 if (Thread::is_interrupted(thread, false)) { 5496 return; 5497 } 5498 5499 // First, demultiplex/decode time arguments 5500 timespec absTime; 5501 if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all 5502 return; 5503 } 5504 if (time > 0) { 5505 // Warning: this code might be exposed to the old Solaris time 5506 // round-down bugs. Grep "roundingFix" for details. 5507 unpackTime(&absTime, isAbsolute, time); 5508 } 5509 5510 // Enter safepoint region 5511 // Beware of deadlocks such as 6317397. 5512 // The per-thread Parker:: _mutex is a classic leaf-lock. 5513 // In particular a thread must never block on the Threads_lock while 5514 // holding the Parker:: mutex. If safepoints are pending both the 5515 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 5516 ThreadBlockInVM tbivm(jt); 5517 5518 // Don't wait if cannot get lock since interference arises from 5519 // unblocking. Also. check interrupt before trying wait 5520 if (Thread::is_interrupted(thread, false) || 5521 os::Solaris::mutex_trylock(_mutex) != 0) { 5522 return; 5523 } 5524 5525 int status; 5526 5527 if (_counter > 0) { // no wait needed 5528 _counter = 0; 5529 status = os::Solaris::mutex_unlock(_mutex); 5530 assert(status == 0, "invariant"); 5531 // Paranoia to ensure our locked and lock-free paths interact 5532 // correctly with each other and Java-level accesses. 5533 OrderAccess::fence(); 5534 return; 5535 } 5536 5537#ifdef ASSERT 5538 // Don't catch signals while blocked; let the running threads have the signals. 5539 // (This allows a debugger to break into the running thread.) 5540 sigset_t oldsigs; 5541 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 5542 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 5543#endif 5544 5545 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5546 jt->set_suspend_equivalent(); 5547 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 5548 5549 // Do this the hard way by blocking ... 5550 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5551 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5552 // Only for SPARC >= V8PlusA 5553#if defined(__sparc) && defined(COMPILER2) 5554 if (ClearFPUAtPark) { _mark_fpu_nosave(); } 5555#endif 5556 5557 if (time == 0) { 5558 status = os::Solaris::cond_wait(_cond, _mutex); 5559 } else { 5560 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 5561 } 5562 // Note that an untimed cond_wait() can sometimes return ETIME on older 5563 // versions of the Solaris. 5564 assert_status(status == 0 || status == EINTR || 5565 status == ETIME || status == ETIMEDOUT, 5566 status, "cond_timedwait"); 5567 5568#ifdef ASSERT 5569 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 5570#endif 5571 _counter = 0; 5572 status = os::Solaris::mutex_unlock(_mutex); 5573 assert_status(status == 0, status, "mutex_unlock"); 5574 // Paranoia to ensure our locked and lock-free paths interact 5575 // correctly with each other and Java-level accesses. 5576 OrderAccess::fence(); 5577 5578 // If externally suspended while waiting, re-suspend 5579 if (jt->handle_special_suspend_equivalent_condition()) { 5580 jt->java_suspend_self(); 5581 } 5582} 5583 5584void Parker::unpark() { 5585 int status = os::Solaris::mutex_lock(_mutex); 5586 assert(status == 0, "invariant"); 5587 const int s = _counter; 5588 _counter = 1; 5589 status = os::Solaris::mutex_unlock(_mutex); 5590 assert(status == 0, "invariant"); 5591 5592 if (s < 1) { 5593 status = os::Solaris::cond_signal(_cond); 5594 assert(status == 0, "invariant"); 5595 } 5596} 5597 5598extern char** environ; 5599 5600// Run the specified command in a separate process. Return its exit value, 5601// or -1 on failure (e.g. can't fork a new process). 5602// Unlike system(), this function can be called from signal handler. It 5603// doesn't block SIGINT et al. 5604int os::fork_and_exec(char* cmd) { 5605 char * argv[4]; 5606 argv[0] = (char *)"sh"; 5607 argv[1] = (char *)"-c"; 5608 argv[2] = cmd; 5609 argv[3] = NULL; 5610 5611 // fork is async-safe, fork1 is not so can't use in signal handler 5612 pid_t pid; 5613 Thread* t = ThreadLocalStorage::get_thread_slow(); 5614 if (t != NULL && t->is_inside_signal_handler()) { 5615 pid = fork(); 5616 } else { 5617 pid = fork1(); 5618 } 5619 5620 if (pid < 0) { 5621 // fork failed 5622 warning("fork failed: %s", strerror(errno)); 5623 return -1; 5624 5625 } else if (pid == 0) { 5626 // child process 5627 5628 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 5629 execve("/usr/bin/sh", argv, environ); 5630 5631 // execve failed 5632 _exit(-1); 5633 5634 } else { 5635 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 5636 // care about the actual exit code, for now. 5637 5638 int status; 5639 5640 // Wait for the child process to exit. This returns immediately if 5641 // the child has already exited. */ 5642 while (waitpid(pid, &status, 0) < 0) { 5643 switch (errno) { 5644 case ECHILD: return 0; 5645 case EINTR: break; 5646 default: return -1; 5647 } 5648 } 5649 5650 if (WIFEXITED(status)) { 5651 // The child exited normally; get its exit code. 5652 return WEXITSTATUS(status); 5653 } else if (WIFSIGNALED(status)) { 5654 // The child exited because of a signal 5655 // The best value to return is 0x80 + signal number, 5656 // because that is what all Unix shells do, and because 5657 // it allows callers to distinguish between process exit and 5658 // process death by signal. 5659 return 0x80 + WTERMSIG(status); 5660 } else { 5661 // Unknown exit code; pass it through 5662 return status; 5663 } 5664 } 5665} 5666 5667// is_headless_jre() 5668// 5669// Test for the existence of xawt/libmawt.so or libawt_xawt.so 5670// in order to report if we are running in a headless jre 5671// 5672// Since JDK8 xawt/libmawt.so was moved into the same directory 5673// as libawt.so, and renamed libawt_xawt.so 5674// 5675bool os::is_headless_jre() { 5676 struct stat statbuf; 5677 char buf[MAXPATHLEN]; 5678 char libmawtpath[MAXPATHLEN]; 5679 const char *xawtstr = "/xawt/libmawt.so"; 5680 const char *new_xawtstr = "/libawt_xawt.so"; 5681 char *p; 5682 5683 // Get path to libjvm.so 5684 os::jvm_path(buf, sizeof(buf)); 5685 5686 // Get rid of libjvm.so 5687 p = strrchr(buf, '/'); 5688 if (p == NULL) { 5689 return false; 5690 } else { 5691 *p = '\0'; 5692 } 5693 5694 // Get rid of client or server 5695 p = strrchr(buf, '/'); 5696 if (p == NULL) { 5697 return false; 5698 } else { 5699 *p = '\0'; 5700 } 5701 5702 // check xawt/libmawt.so 5703 strcpy(libmawtpath, buf); 5704 strcat(libmawtpath, xawtstr); 5705 if (::stat(libmawtpath, &statbuf) == 0) return false; 5706 5707 // check libawt_xawt.so 5708 strcpy(libmawtpath, buf); 5709 strcat(libmawtpath, new_xawtstr); 5710 if (::stat(libmawtpath, &statbuf) == 0) return false; 5711 5712 return true; 5713} 5714 5715size_t os::write(int fd, const void *buf, unsigned int nBytes) { 5716 size_t res; 5717 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 5718 "Assumed _thread_in_native"); 5719 RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res); 5720 return res; 5721} 5722 5723int os::close(int fd) { 5724 return ::close(fd); 5725} 5726 5727int os::socket_close(int fd) { 5728 return ::close(fd); 5729} 5730 5731int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5732 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 5733 "Assumed _thread_in_native"); 5734 RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags)); 5735} 5736 5737int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5738 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 5739 "Assumed _thread_in_native"); 5740 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 5741} 5742 5743int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5744 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 5745} 5746 5747// As both poll and select can be interrupted by signals, we have to be 5748// prepared to restart the system call after updating the timeout, unless 5749// a poll() is done with timeout == -1, in which case we repeat with this 5750// "wait forever" value. 5751 5752int os::connect(int fd, struct sockaddr *him, socklen_t len) { 5753 int _result; 5754 _result = ::connect(fd, him, len); 5755 5756 // On Solaris, when a connect() call is interrupted, the connection 5757 // can be established asynchronously (see 6343810). Subsequent calls 5758 // to connect() must check the errno value which has the semantic 5759 // described below (copied from the connect() man page). Handling 5760 // of asynchronously established connections is required for both 5761 // blocking and non-blocking sockets. 5762 // EINTR The connection attempt was interrupted 5763 // before any data arrived by the delivery of 5764 // a signal. The connection, however, will be 5765 // established asynchronously. 5766 // 5767 // EINPROGRESS The socket is non-blocking, and the connec- 5768 // tion cannot be completed immediately. 5769 // 5770 // EALREADY The socket is non-blocking, and a previous 5771 // connection attempt has not yet been com- 5772 // pleted. 5773 // 5774 // EISCONN The socket is already connected. 5775 if (_result == OS_ERR && errno == EINTR) { 5776 // restarting a connect() changes its errno semantics 5777 RESTARTABLE(::connect(fd, him, len), _result); 5778 // undo these changes 5779 if (_result == OS_ERR) { 5780 if (errno == EALREADY) { 5781 errno = EINPROGRESS; // fall through 5782 } else if (errno == EISCONN) { 5783 errno = 0; 5784 return OS_OK; 5785 } 5786 } 5787 } 5788 return _result; 5789} 5790 5791// Get the default path to the core file 5792// Returns the length of the string 5793int os::get_core_path(char* buffer, size_t bufferSize) { 5794 const char* p = get_current_directory(buffer, bufferSize); 5795 5796 if (p == NULL) { 5797 assert(p != NULL, "failed to get current directory"); 5798 return 0; 5799 } 5800 5801 jio_snprintf(buffer, bufferSize, "%s/core or core.%d", 5802 p, current_process_id()); 5803 5804 return strlen(buffer); 5805} 5806 5807#ifndef PRODUCT 5808void TestReserveMemorySpecial_test() { 5809 // No tests available for this platform 5810} 5811#endif 5812