os_solaris.cpp revision 6402:2377269bd73d
1/*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// no precompiled headers
26#include "classfile/classLoader.hpp"
27#include "classfile/systemDictionary.hpp"
28#include "classfile/vmSymbols.hpp"
29#include "code/icBuffer.hpp"
30#include "code/vtableStubs.hpp"
31#include "compiler/compileBroker.hpp"
32#include "compiler/disassembler.hpp"
33#include "interpreter/interpreter.hpp"
34#include "jvm_solaris.h"
35#include "memory/allocation.inline.hpp"
36#include "memory/filemap.hpp"
37#include "mutex_solaris.inline.hpp"
38#include "oops/oop.inline.hpp"
39#include "os_share_solaris.hpp"
40#include "prims/jniFastGetField.hpp"
41#include "prims/jvm.h"
42#include "prims/jvm_misc.hpp"
43#include "runtime/arguments.hpp"
44#include "runtime/extendedPC.hpp"
45#include "runtime/globals.hpp"
46#include "runtime/interfaceSupport.hpp"
47#include "runtime/java.hpp"
48#include "runtime/javaCalls.hpp"
49#include "runtime/mutexLocker.hpp"
50#include "runtime/objectMonitor.hpp"
51#include "runtime/orderAccess.inline.hpp"
52#include "runtime/osThread.hpp"
53#include "runtime/perfMemory.hpp"
54#include "runtime/sharedRuntime.hpp"
55#include "runtime/statSampler.hpp"
56#include "runtime/stubRoutines.hpp"
57#include "runtime/thread.inline.hpp"
58#include "runtime/threadCritical.hpp"
59#include "runtime/timer.hpp"
60#include "services/attachListener.hpp"
61#include "services/memTracker.hpp"
62#include "services/runtimeService.hpp"
63#include "utilities/decoder.hpp"
64#include "utilities/defaultStream.hpp"
65#include "utilities/events.hpp"
66#include "utilities/growableArray.hpp"
67#include "utilities/vmError.hpp"
68
69// put OS-includes here
70# include <dlfcn.h>
71# include <errno.h>
72# include <exception>
73# include <link.h>
74# include <poll.h>
75# include <pthread.h>
76# include <pwd.h>
77# include <schedctl.h>
78# include <setjmp.h>
79# include <signal.h>
80# include <stdio.h>
81# include <alloca.h>
82# include <sys/filio.h>
83# include <sys/ipc.h>
84# include <sys/lwp.h>
85# include <sys/machelf.h>     // for elf Sym structure used by dladdr1
86# include <sys/mman.h>
87# include <sys/processor.h>
88# include <sys/procset.h>
89# include <sys/pset.h>
90# include <sys/resource.h>
91# include <sys/shm.h>
92# include <sys/socket.h>
93# include <sys/stat.h>
94# include <sys/systeminfo.h>
95# include <sys/time.h>
96# include <sys/times.h>
97# include <sys/types.h>
98# include <sys/wait.h>
99# include <sys/utsname.h>
100# include <thread.h>
101# include <unistd.h>
102# include <sys/priocntl.h>
103# include <sys/rtpriocntl.h>
104# include <sys/tspriocntl.h>
105# include <sys/iapriocntl.h>
106# include <sys/fxpriocntl.h>
107# include <sys/loadavg.h>
108# include <string.h>
109# include <stdio.h>
110
111# define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
112# include <sys/procfs.h>     //  see comment in <sys/procfs.h>
113
114#define MAX_PATH (2 * K)
115
116// for timer info max values which include all bits
117#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
118
119
120// Here are some liblgrp types from sys/lgrp_user.h to be able to
121// compile on older systems without this header file.
122
123#ifndef MADV_ACCESS_LWP
124# define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
125#endif
126#ifndef MADV_ACCESS_MANY
127# define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
128#endif
129
130#ifndef LGRP_RSRC_CPU
131# define LGRP_RSRC_CPU           0       /* CPU resources */
132#endif
133#ifndef LGRP_RSRC_MEM
134# define LGRP_RSRC_MEM           1       /* memory resources */
135#endif
136
137// see thr_setprio(3T) for the basis of these numbers
138#define MinimumPriority 0
139#define NormalPriority  64
140#define MaximumPriority 127
141
142// Values for ThreadPriorityPolicy == 1
143int prio_policy1[CriticalPriority+1] = {
144  -99999,  0, 16,  32,  48,  64,
145          80, 96, 112, 124, 127, 127 };
146
147// System parameters used internally
148static clock_t clock_tics_per_sec = 100;
149
150// Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
151static bool enabled_extended_FILE_stdio = false;
152
153// For diagnostics to print a message once. see run_periodic_checks
154static bool check_addr0_done = false;
155static sigset_t check_signal_done;
156static bool check_signals = true;
157
158address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
159address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
160
161address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
162
163
164// "default" initializers for missing libc APIs
165extern "C" {
166  static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
167  static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
168
169  static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
170  static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
171}
172
173// "default" initializers for pthread-based synchronization
174extern "C" {
175  static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
176  static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
177}
178
179static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
180
181// Thread Local Storage
182// This is common to all Solaris platforms so it is defined here,
183// in this common file.
184// The declarations are in the os_cpu threadLS*.hpp files.
185//
186// Static member initialization for TLS
187Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
188
189#ifndef PRODUCT
190#define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
191
192int ThreadLocalStorage::_tcacheHit = 0;
193int ThreadLocalStorage::_tcacheMiss = 0;
194
195void ThreadLocalStorage::print_statistics() {
196  int total = _tcacheMiss+_tcacheHit;
197  tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
198                _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
199}
200#undef _PCT
201#endif // PRODUCT
202
203Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
204                                                        int index) {
205  Thread *thread = get_thread_slow();
206  if (thread != NULL) {
207    address sp = os::current_stack_pointer();
208    guarantee(thread->_stack_base == NULL ||
209              (sp <= thread->_stack_base &&
210                 sp >= thread->_stack_base - thread->_stack_size) ||
211               is_error_reported(),
212              "sp must be inside of selected thread stack");
213
214    thread->set_self_raw_id(raw_id);  // mark for quick retrieval
215    _get_thread_cache[ index ] = thread;
216  }
217  return thread;
218}
219
220
221static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
222#define NO_CACHED_THREAD ((Thread*)all_zero)
223
224void ThreadLocalStorage::pd_set_thread(Thread* thread) {
225
226  // Store the new value before updating the cache to prevent a race
227  // between get_thread_via_cache_slowly() and this store operation.
228  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
229
230  // Update thread cache with new thread if setting on thread create,
231  // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
232  uintptr_t raw = pd_raw_thread_id();
233  int ix = pd_cache_index(raw);
234  _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
235}
236
237void ThreadLocalStorage::pd_init() {
238  for (int i = 0; i < _pd_cache_size; i++) {
239    _get_thread_cache[i] = NO_CACHED_THREAD;
240  }
241}
242
243// Invalidate all the caches (happens to be the same as pd_init).
244void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
245
246#undef NO_CACHED_THREAD
247
248// END Thread Local Storage
249
250static inline size_t adjust_stack_size(address base, size_t size) {
251  if ((ssize_t)size < 0) {
252    // 4759953: Compensate for ridiculous stack size.
253    size = max_intx;
254  }
255  if (size > (size_t)base) {
256    // 4812466: Make sure size doesn't allow the stack to wrap the address space.
257    size = (size_t)base;
258  }
259  return size;
260}
261
262static inline stack_t get_stack_info() {
263  stack_t st;
264  int retval = thr_stksegment(&st);
265  st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
266  assert(retval == 0, "incorrect return value from thr_stksegment");
267  assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
268  assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
269  return st;
270}
271
272address os::current_stack_base() {
273  int r = thr_main() ;
274  guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
275  bool is_primordial_thread = r;
276
277  // Workaround 4352906, avoid calls to thr_stksegment by
278  // thr_main after the first one (it looks like we trash
279  // some data, causing the value for ss_sp to be incorrect).
280  if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
281    stack_t st = get_stack_info();
282    if (is_primordial_thread) {
283      // cache initial value of stack base
284      os::Solaris::_main_stack_base = (address)st.ss_sp;
285    }
286    return (address)st.ss_sp;
287  } else {
288    guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
289    return os::Solaris::_main_stack_base;
290  }
291}
292
293size_t os::current_stack_size() {
294  size_t size;
295
296  int r = thr_main() ;
297  guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
298  if(!r) {
299    size = get_stack_info().ss_size;
300  } else {
301    struct rlimit limits;
302    getrlimit(RLIMIT_STACK, &limits);
303    size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
304  }
305  // base may not be page aligned
306  address base = current_stack_base();
307  address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
308  return (size_t)(base - bottom);
309}
310
311struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
312  return localtime_r(clock, res);
313}
314
315void os::Solaris::try_enable_extended_io() {
316  typedef int (*enable_extended_FILE_stdio_t)(int, int);
317
318  if (!UseExtendedFileIO) {
319    return;
320  }
321
322  enable_extended_FILE_stdio_t enabler =
323    (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
324                                         "enable_extended_FILE_stdio");
325  if (enabler) {
326    enabler(-1, -1);
327  }
328}
329
330static int _processors_online = 0;
331
332         jint os::Solaris::_os_thread_limit = 0;
333volatile jint os::Solaris::_os_thread_count = 0;
334
335julong os::available_memory() {
336  return Solaris::available_memory();
337}
338
339julong os::Solaris::available_memory() {
340  return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
341}
342
343julong os::Solaris::_physical_memory = 0;
344
345julong os::physical_memory() {
346   return Solaris::physical_memory();
347}
348
349static hrtime_t first_hrtime = 0;
350static const hrtime_t hrtime_hz = 1000*1000*1000;
351static volatile hrtime_t max_hrtime = 0;
352
353
354void os::Solaris::initialize_system_info() {
355  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
356  _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
357  _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
358}
359
360int os::active_processor_count() {
361  int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
362  pid_t pid = getpid();
363  psetid_t pset = PS_NONE;
364  // Are we running in a processor set or is there any processor set around?
365  if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
366    uint_t pset_cpus;
367    // Query the number of cpus available to us.
368    if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
369      assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
370      _processors_online = pset_cpus;
371      return pset_cpus;
372    }
373  }
374  // Otherwise return number of online cpus
375  return online_cpus;
376}
377
378static bool find_processors_in_pset(psetid_t        pset,
379                                    processorid_t** id_array,
380                                    uint_t*         id_length) {
381  bool result = false;
382  // Find the number of processors in the processor set.
383  if (pset_info(pset, NULL, id_length, NULL) == 0) {
384    // Make up an array to hold their ids.
385    *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
386    // Fill in the array with their processor ids.
387    if (pset_info(pset, NULL, id_length, *id_array) == 0) {
388      result = true;
389    }
390  }
391  return result;
392}
393
394// Callers of find_processors_online() must tolerate imprecise results --
395// the system configuration can change asynchronously because of DR
396// or explicit psradm operations.
397//
398// We also need to take care that the loop (below) terminates as the
399// number of processors online can change between the _SC_NPROCESSORS_ONLN
400// request and the loop that builds the list of processor ids.   Unfortunately
401// there's no reliable way to determine the maximum valid processor id,
402// so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
403// man pages, which claim the processor id set is "sparse, but
404// not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
405// exit the loop.
406//
407// In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
408// not available on S8.0.
409
410static bool find_processors_online(processorid_t** id_array,
411                                   uint*           id_length) {
412  const processorid_t MAX_PROCESSOR_ID = 100000 ;
413  // Find the number of processors online.
414  *id_length = sysconf(_SC_NPROCESSORS_ONLN);
415  // Make up an array to hold their ids.
416  *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
417  // Processors need not be numbered consecutively.
418  long found = 0;
419  processorid_t next = 0;
420  while (found < *id_length && next < MAX_PROCESSOR_ID) {
421    processor_info_t info;
422    if (processor_info(next, &info) == 0) {
423      // NB, PI_NOINTR processors are effectively online ...
424      if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
425        (*id_array)[found] = next;
426        found += 1;
427      }
428    }
429    next += 1;
430  }
431  if (found < *id_length) {
432      // The loop above didn't identify the expected number of processors.
433      // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
434      // and re-running the loop, above, but there's no guarantee of progress
435      // if the system configuration is in flux.  Instead, we just return what
436      // we've got.  Note that in the worst case find_processors_online() could
437      // return an empty set.  (As a fall-back in the case of the empty set we
438      // could just return the ID of the current processor).
439      *id_length = found ;
440  }
441
442  return true;
443}
444
445static bool assign_distribution(processorid_t* id_array,
446                                uint           id_length,
447                                uint*          distribution,
448                                uint           distribution_length) {
449  // We assume we can assign processorid_t's to uint's.
450  assert(sizeof(processorid_t) == sizeof(uint),
451         "can't convert processorid_t to uint");
452  // Quick check to see if we won't succeed.
453  if (id_length < distribution_length) {
454    return false;
455  }
456  // Assign processor ids to the distribution.
457  // Try to shuffle processors to distribute work across boards,
458  // assuming 4 processors per board.
459  const uint processors_per_board = ProcessDistributionStride;
460  // Find the maximum processor id.
461  processorid_t max_id = 0;
462  for (uint m = 0; m < id_length; m += 1) {
463    max_id = MAX2(max_id, id_array[m]);
464  }
465  // The next id, to limit loops.
466  const processorid_t limit_id = max_id + 1;
467  // Make up markers for available processors.
468  bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
469  for (uint c = 0; c < limit_id; c += 1) {
470    available_id[c] = false;
471  }
472  for (uint a = 0; a < id_length; a += 1) {
473    available_id[id_array[a]] = true;
474  }
475  // Step by "boards", then by "slot", copying to "assigned".
476  // NEEDS_CLEANUP: The assignment of processors should be stateful,
477  //                remembering which processors have been assigned by
478  //                previous calls, etc., so as to distribute several
479  //                independent calls of this method.  What we'd like is
480  //                It would be nice to have an API that let us ask
481  //                how many processes are bound to a processor,
482  //                but we don't have that, either.
483  //                In the short term, "board" is static so that
484  //                subsequent distributions don't all start at board 0.
485  static uint board = 0;
486  uint assigned = 0;
487  // Until we've found enough processors ....
488  while (assigned < distribution_length) {
489    // ... find the next available processor in the board.
490    for (uint slot = 0; slot < processors_per_board; slot += 1) {
491      uint try_id = board * processors_per_board + slot;
492      if ((try_id < limit_id) && (available_id[try_id] == true)) {
493        distribution[assigned] = try_id;
494        available_id[try_id] = false;
495        assigned += 1;
496        break;
497      }
498    }
499    board += 1;
500    if (board * processors_per_board + 0 >= limit_id) {
501      board = 0;
502    }
503  }
504  if (available_id != NULL) {
505    FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
506  }
507  return true;
508}
509
510void os::set_native_thread_name(const char *name) {
511  // Not yet implemented.
512  return;
513}
514
515bool os::distribute_processes(uint length, uint* distribution) {
516  bool result = false;
517  // Find the processor id's of all the available CPUs.
518  processorid_t* id_array  = NULL;
519  uint           id_length = 0;
520  // There are some races between querying information and using it,
521  // since processor sets can change dynamically.
522  psetid_t pset = PS_NONE;
523  // Are we running in a processor set?
524  if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
525    result = find_processors_in_pset(pset, &id_array, &id_length);
526  } else {
527    result = find_processors_online(&id_array, &id_length);
528  }
529  if (result == true) {
530    if (id_length >= length) {
531      result = assign_distribution(id_array, id_length, distribution, length);
532    } else {
533      result = false;
534    }
535  }
536  if (id_array != NULL) {
537    FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
538  }
539  return result;
540}
541
542bool os::bind_to_processor(uint processor_id) {
543  // We assume that a processorid_t can be stored in a uint.
544  assert(sizeof(uint) == sizeof(processorid_t),
545         "can't convert uint to processorid_t");
546  int bind_result =
547    processor_bind(P_LWPID,                       // bind LWP.
548                   P_MYID,                        // bind current LWP.
549                   (processorid_t) processor_id,  // id.
550                   NULL);                         // don't return old binding.
551  return (bind_result == 0);
552}
553
554bool os::getenv(const char* name, char* buffer, int len) {
555  char* val = ::getenv( name );
556  if ( val == NULL
557  ||   strlen(val) + 1  >  len ) {
558    if (len > 0)  buffer[0] = 0; // return a null string
559    return false;
560  }
561  strcpy( buffer, val );
562  return true;
563}
564
565
566// Return true if user is running as root.
567
568bool os::have_special_privileges() {
569  static bool init = false;
570  static bool privileges = false;
571  if (!init) {
572    privileges = (getuid() != geteuid()) || (getgid() != getegid());
573    init = true;
574  }
575  return privileges;
576}
577
578
579void os::init_system_properties_values() {
580  // The next steps are taken in the product version:
581  //
582  // Obtain the JAVA_HOME value from the location of libjvm.so.
583  // This library should be located at:
584  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
585  //
586  // If "/jre/lib/" appears at the right place in the path, then we
587  // assume libjvm.so is installed in a JDK and we use this path.
588  //
589  // Otherwise exit with message: "Could not create the Java virtual machine."
590  //
591  // The following extra steps are taken in the debugging version:
592  //
593  // If "/jre/lib/" does NOT appear at the right place in the path
594  // instead of exit check for $JAVA_HOME environment variable.
595  //
596  // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
597  // then we append a fake suffix "hotspot/libjvm.so" to this path so
598  // it looks like libjvm.so is installed there
599  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
600  //
601  // Otherwise exit.
602  //
603  // Important note: if the location of libjvm.so changes this
604  // code needs to be changed accordingly.
605
606// Base path of extensions installed on the system.
607#define SYS_EXT_DIR     "/usr/jdk/packages"
608#define EXTENSIONS_DIR  "/lib/ext"
609#define ENDORSED_DIR    "/lib/endorsed"
610
611  char cpu_arch[12];
612  // Buffer that fits several sprintfs.
613  // Note that the space for the colon and the trailing null are provided
614  // by the nulls included by the sizeof operator.
615  const size_t bufsize =
616    MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
617         sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
618         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
619         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
620  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
621
622  // sysclasspath, java_home, dll_dir
623  {
624    char *pslash;
625    os::jvm_path(buf, bufsize);
626
627    // Found the full path to libjvm.so.
628    // Now cut the path to <java_home>/jre if we can.
629    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
630    pslash = strrchr(buf, '/');
631    if (pslash != NULL) {
632      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
633    }
634    Arguments::set_dll_dir(buf);
635
636    if (pslash != NULL) {
637      pslash = strrchr(buf, '/');
638      if (pslash != NULL) {
639        *pslash = '\0';          // Get rid of /<arch>.
640        pslash = strrchr(buf, '/');
641        if (pslash != NULL) {
642          *pslash = '\0';        // Get rid of /lib.
643        }
644      }
645    }
646    Arguments::set_java_home(buf);
647    set_boot_path('/', ':');
648  }
649
650  // Where to look for native libraries.
651  {
652    // Use dlinfo() to determine the correct java.library.path.
653    //
654    // If we're launched by the Java launcher, and the user
655    // does not set java.library.path explicitly on the commandline,
656    // the Java launcher sets LD_LIBRARY_PATH for us and unsets
657    // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
658    // dlinfo returns LD_LIBRARY_PATH + crle settings (including
659    // /usr/lib), which is exactly what we want.
660    //
661    // If the user does set java.library.path, it completely
662    // overwrites this setting, and always has.
663    //
664    // If we're not launched by the Java launcher, we may
665    // get here with any/all of the LD_LIBRARY_PATH[_32|64]
666    // settings.  Again, dlinfo does exactly what we want.
667
668    Dl_serinfo     info_sz, *info = &info_sz;
669    Dl_serpath     *path;
670    char           *library_path;
671    char           *common_path = buf;
672
673    // Determine search path count and required buffer size.
674    if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
675      FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
676      vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
677    }
678
679    // Allocate new buffer and initialize.
680    info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
681    info->dls_size = info_sz.dls_size;
682    info->dls_cnt = info_sz.dls_cnt;
683
684    // Obtain search path information.
685    if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
686      FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
687      FREE_C_HEAP_ARRAY(char, info, mtInternal);
688      vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
689    }
690
691    path = &info->dls_serpath[0];
692
693    // Note: Due to a legacy implementation, most of the library path
694    // is set in the launcher. This was to accomodate linking restrictions
695    // on legacy Solaris implementations (which are no longer supported).
696    // Eventually, all the library path setting will be done here.
697    //
698    // However, to prevent the proliferation of improperly built native
699    // libraries, the new path component /usr/jdk/packages is added here.
700
701    // Determine the actual CPU architecture.
702    sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
703#ifdef _LP64
704    // If we are a 64-bit vm, perform the following translations:
705    //   sparc   -> sparcv9
706    //   i386    -> amd64
707    if (strcmp(cpu_arch, "sparc") == 0) {
708      strcat(cpu_arch, "v9");
709    } else if (strcmp(cpu_arch, "i386") == 0) {
710      strcpy(cpu_arch, "amd64");
711    }
712#endif
713
714    // Construct the invariant part of ld_library_path.
715    sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
716
717    // Struct size is more than sufficient for the path components obtained
718    // through the dlinfo() call, so only add additional space for the path
719    // components explicitly added here.
720    size_t library_path_size = info->dls_size + strlen(common_path);
721    library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
722    library_path[0] = '\0';
723
724    // Construct the desired Java library path from the linker's library
725    // search path.
726    //
727    // For compatibility, it is optimal that we insert the additional path
728    // components specific to the Java VM after those components specified
729    // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
730    // infrastructure.
731    if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
732      strcpy(library_path, common_path);
733    } else {
734      int inserted = 0;
735      int i;
736      for (i = 0; i < info->dls_cnt; i++, path++) {
737        uint_t flags = path->dls_flags & LA_SER_MASK;
738        if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
739          strcat(library_path, common_path);
740          strcat(library_path, os::path_separator());
741          inserted = 1;
742        }
743        strcat(library_path, path->dls_name);
744        strcat(library_path, os::path_separator());
745      }
746      // Eliminate trailing path separator.
747      library_path[strlen(library_path)-1] = '\0';
748    }
749
750    // happens before argument parsing - can't use a trace flag
751    // tty->print_raw("init_system_properties_values: native lib path: ");
752    // tty->print_raw_cr(library_path);
753
754    // Callee copies into its own buffer.
755    Arguments::set_library_path(library_path);
756
757    FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
758    FREE_C_HEAP_ARRAY(char, info, mtInternal);
759  }
760
761  // Extensions directories.
762  sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
763  Arguments::set_ext_dirs(buf);
764
765  // Endorsed standards default directory.
766  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
767  Arguments::set_endorsed_dirs(buf);
768
769  FREE_C_HEAP_ARRAY(char, buf, mtInternal);
770
771#undef SYS_EXT_DIR
772#undef EXTENSIONS_DIR
773#undef ENDORSED_DIR
774}
775
776void os::breakpoint() {
777  BREAKPOINT;
778}
779
780bool os::obsolete_option(const JavaVMOption *option)
781{
782  if (!strncmp(option->optionString, "-Xt", 3)) {
783    return true;
784  } else if (!strncmp(option->optionString, "-Xtm", 4)) {
785    return true;
786  } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
787    return true;
788  } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
789    return true;
790  }
791  return false;
792}
793
794bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
795  address  stackStart  = (address)thread->stack_base();
796  address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
797  if (sp < stackStart && sp >= stackEnd ) return true;
798  return false;
799}
800
801extern "C" void breakpoint() {
802  // use debugger to set breakpoint here
803}
804
805static thread_t main_thread;
806
807// Thread start routine for all new Java threads
808extern "C" void* java_start(void* thread_addr) {
809  // Try to randomize the cache line index of hot stack frames.
810  // This helps when threads of the same stack traces evict each other's
811  // cache lines. The threads can be either from the same JVM instance, or
812  // from different JVM instances. The benefit is especially true for
813  // processors with hyperthreading technology.
814  static int counter = 0;
815  int pid = os::current_process_id();
816  alloca(((pid ^ counter++) & 7) * 128);
817
818  int prio;
819  Thread* thread = (Thread*)thread_addr;
820  OSThread* osthr = thread->osthread();
821
822  osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
823  thread->_schedctl = (void *) schedctl_init () ;
824
825  if (UseNUMA) {
826    int lgrp_id = os::numa_get_group_id();
827    if (lgrp_id != -1) {
828      thread->set_lgrp_id(lgrp_id);
829    }
830  }
831
832  // If the creator called set priority before we started,
833  // we need to call set_native_priority now that we have an lwp.
834  // We used to get the priority from thr_getprio (we called
835  // thr_setprio way back in create_thread) and pass it to
836  // set_native_priority, but Solaris scales the priority
837  // in java_to_os_priority, so when we read it back here,
838  // we pass trash to set_native_priority instead of what's
839  // in java_to_os_priority. So we save the native priority
840  // in the osThread and recall it here.
841
842  if ( osthr->thread_id() != -1 ) {
843    if ( UseThreadPriorities ) {
844      int prio = osthr->native_priority();
845      if (ThreadPriorityVerbose) {
846        tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
847                      INTPTR_FORMAT ", setting priority: %d\n",
848                      osthr->thread_id(), osthr->lwp_id(), prio);
849      }
850      os::set_native_priority(thread, prio);
851    }
852  } else if (ThreadPriorityVerbose) {
853    warning("Can't set priority in _start routine, thread id hasn't been set\n");
854  }
855
856  assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
857
858  // initialize signal mask for this thread
859  os::Solaris::hotspot_sigmask(thread);
860
861  thread->run();
862
863  // One less thread is executing
864  // When the VMThread gets here, the main thread may have already exited
865  // which frees the CodeHeap containing the Atomic::dec code
866  if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
867    Atomic::dec(&os::Solaris::_os_thread_count);
868  }
869
870  if (UseDetachedThreads) {
871    thr_exit(NULL);
872    ShouldNotReachHere();
873  }
874  return NULL;
875}
876
877static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
878  // Allocate the OSThread object
879  OSThread* osthread = new OSThread(NULL, NULL);
880  if (osthread == NULL) return NULL;
881
882  // Store info on the Solaris thread into the OSThread
883  osthread->set_thread_id(thread_id);
884  osthread->set_lwp_id(_lwp_self());
885  thread->_schedctl = (void *) schedctl_init () ;
886
887  if (UseNUMA) {
888    int lgrp_id = os::numa_get_group_id();
889    if (lgrp_id != -1) {
890      thread->set_lgrp_id(lgrp_id);
891    }
892  }
893
894  if ( ThreadPriorityVerbose ) {
895    tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
896                  osthread->thread_id(), osthread->lwp_id() );
897  }
898
899  // Initial thread state is INITIALIZED, not SUSPENDED
900  osthread->set_state(INITIALIZED);
901
902  return osthread;
903}
904
905void os::Solaris::hotspot_sigmask(Thread* thread) {
906
907  //Save caller's signal mask
908  sigset_t sigmask;
909  thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
910  OSThread *osthread = thread->osthread();
911  osthread->set_caller_sigmask(sigmask);
912
913  thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
914  if (!ReduceSignalUsage) {
915    if (thread->is_VM_thread()) {
916      // Only the VM thread handles BREAK_SIGNAL ...
917      thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
918    } else {
919      // ... all other threads block BREAK_SIGNAL
920      assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
921      thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
922    }
923  }
924}
925
926bool os::create_attached_thread(JavaThread* thread) {
927#ifdef ASSERT
928  thread->verify_not_published();
929#endif
930  OSThread* osthread = create_os_thread(thread, thr_self());
931  if (osthread == NULL) {
932     return false;
933  }
934
935  // Initial thread state is RUNNABLE
936  osthread->set_state(RUNNABLE);
937  thread->set_osthread(osthread);
938
939  // initialize signal mask for this thread
940  // and save the caller's signal mask
941  os::Solaris::hotspot_sigmask(thread);
942
943  return true;
944}
945
946bool os::create_main_thread(JavaThread* thread) {
947#ifdef ASSERT
948  thread->verify_not_published();
949#endif
950  if (_starting_thread == NULL) {
951    _starting_thread = create_os_thread(thread, main_thread);
952     if (_starting_thread == NULL) {
953        return false;
954     }
955  }
956
957  // The primodial thread is runnable from the start
958  _starting_thread->set_state(RUNNABLE);
959
960  thread->set_osthread(_starting_thread);
961
962  // initialize signal mask for this thread
963  // and save the caller's signal mask
964  os::Solaris::hotspot_sigmask(thread);
965
966  return true;
967}
968
969
970bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
971  // Allocate the OSThread object
972  OSThread* osthread = new OSThread(NULL, NULL);
973  if (osthread == NULL) {
974    return false;
975  }
976
977  if ( ThreadPriorityVerbose ) {
978    char *thrtyp;
979    switch ( thr_type ) {
980      case vm_thread:
981        thrtyp = (char *)"vm";
982        break;
983      case cgc_thread:
984        thrtyp = (char *)"cgc";
985        break;
986      case pgc_thread:
987        thrtyp = (char *)"pgc";
988        break;
989      case java_thread:
990        thrtyp = (char *)"java";
991        break;
992      case compiler_thread:
993        thrtyp = (char *)"compiler";
994        break;
995      case watcher_thread:
996        thrtyp = (char *)"watcher";
997        break;
998      default:
999        thrtyp = (char *)"unknown";
1000        break;
1001    }
1002    tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1003  }
1004
1005  // Calculate stack size if it's not specified by caller.
1006  if (stack_size == 0) {
1007    // The default stack size 1M (2M for LP64).
1008    stack_size = (BytesPerWord >> 2) * K * K;
1009
1010    switch (thr_type) {
1011    case os::java_thread:
1012      // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1013      if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1014      break;
1015    case os::compiler_thread:
1016      if (CompilerThreadStackSize > 0) {
1017        stack_size = (size_t)(CompilerThreadStackSize * K);
1018        break;
1019      } // else fall through:
1020        // use VMThreadStackSize if CompilerThreadStackSize is not defined
1021    case os::vm_thread:
1022    case os::pgc_thread:
1023    case os::cgc_thread:
1024    case os::watcher_thread:
1025      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1026      break;
1027    }
1028  }
1029  stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1030
1031  // Initial state is ALLOCATED but not INITIALIZED
1032  osthread->set_state(ALLOCATED);
1033
1034  if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1035    // We got lots of threads. Check if we still have some address space left.
1036    // Need to be at least 5Mb of unreserved address space. We do check by
1037    // trying to reserve some.
1038    const size_t VirtualMemoryBangSize = 20*K*K;
1039    char* mem = os::reserve_memory(VirtualMemoryBangSize);
1040    if (mem == NULL) {
1041      delete osthread;
1042      return false;
1043    } else {
1044      // Release the memory again
1045      os::release_memory(mem, VirtualMemoryBangSize);
1046    }
1047  }
1048
1049  // Setup osthread because the child thread may need it.
1050  thread->set_osthread(osthread);
1051
1052  // Create the Solaris thread
1053  thread_t tid = 0;
1054  long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1055  int      status;
1056
1057  // Mark that we don't have an lwp or thread id yet.
1058  // In case we attempt to set the priority before the thread starts.
1059  osthread->set_lwp_id(-1);
1060  osthread->set_thread_id(-1);
1061
1062  status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1063  if (status != 0) {
1064    if (PrintMiscellaneous && (Verbose || WizardMode)) {
1065      perror("os::create_thread");
1066    }
1067    thread->set_osthread(NULL);
1068    // Need to clean up stuff we've allocated so far
1069    delete osthread;
1070    return false;
1071  }
1072
1073  Atomic::inc(&os::Solaris::_os_thread_count);
1074
1075  // Store info on the Solaris thread into the OSThread
1076  osthread->set_thread_id(tid);
1077
1078  // Remember that we created this thread so we can set priority on it
1079  osthread->set_vm_created();
1080
1081  // Initial thread state is INITIALIZED, not SUSPENDED
1082  osthread->set_state(INITIALIZED);
1083
1084  // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1085  return true;
1086}
1087
1088/* defined for >= Solaris 10. This allows builds on earlier versions
1089 *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1090 *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1091 *  and -XX:+UseAltSigs does nothing since these should have no conflict
1092 */
1093#if !defined(SIGJVM1)
1094#define SIGJVM1 39
1095#define SIGJVM2 40
1096#endif
1097
1098debug_only(static bool signal_sets_initialized = false);
1099static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1100int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1101int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1102
1103bool os::Solaris::is_sig_ignored(int sig) {
1104      struct sigaction oact;
1105      sigaction(sig, (struct sigaction*)NULL, &oact);
1106      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1107                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1108      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1109           return true;
1110      else
1111           return false;
1112}
1113
1114// Note: SIGRTMIN is a macro that calls sysconf() so it will
1115// dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1116static bool isJVM1available() {
1117  return SIGJVM1 < SIGRTMIN;
1118}
1119
1120void os::Solaris::signal_sets_init() {
1121  // Should also have an assertion stating we are still single-threaded.
1122  assert(!signal_sets_initialized, "Already initialized");
1123  // Fill in signals that are necessarily unblocked for all threads in
1124  // the VM. Currently, we unblock the following signals:
1125  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1126  //                         by -Xrs (=ReduceSignalUsage));
1127  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1128  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1129  // the dispositions or masks wrt these signals.
1130  // Programs embedding the VM that want to use the above signals for their
1131  // own purposes must, at this time, use the "-Xrs" option to prevent
1132  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1133  // (See bug 4345157, and other related bugs).
1134  // In reality, though, unblocking these signals is really a nop, since
1135  // these signals are not blocked by default.
1136  sigemptyset(&unblocked_sigs);
1137  sigemptyset(&allowdebug_blocked_sigs);
1138  sigaddset(&unblocked_sigs, SIGILL);
1139  sigaddset(&unblocked_sigs, SIGSEGV);
1140  sigaddset(&unblocked_sigs, SIGBUS);
1141  sigaddset(&unblocked_sigs, SIGFPE);
1142
1143  if (isJVM1available) {
1144    os::Solaris::set_SIGinterrupt(SIGJVM1);
1145    os::Solaris::set_SIGasync(SIGJVM2);
1146  } else if (UseAltSigs) {
1147    os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1148    os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1149  } else {
1150    os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1151    os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1152  }
1153
1154  sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1155  sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1156
1157  if (!ReduceSignalUsage) {
1158   if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1159      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1160      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1161   }
1162   if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1163      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1164      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1165   }
1166   if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1167      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1168      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1169   }
1170  }
1171  // Fill in signals that are blocked by all but the VM thread.
1172  sigemptyset(&vm_sigs);
1173  if (!ReduceSignalUsage)
1174    sigaddset(&vm_sigs, BREAK_SIGNAL);
1175  debug_only(signal_sets_initialized = true);
1176
1177  // For diagnostics only used in run_periodic_checks
1178  sigemptyset(&check_signal_done);
1179}
1180
1181// These are signals that are unblocked while a thread is running Java.
1182// (For some reason, they get blocked by default.)
1183sigset_t* os::Solaris::unblocked_signals() {
1184  assert(signal_sets_initialized, "Not initialized");
1185  return &unblocked_sigs;
1186}
1187
1188// These are the signals that are blocked while a (non-VM) thread is
1189// running Java. Only the VM thread handles these signals.
1190sigset_t* os::Solaris::vm_signals() {
1191  assert(signal_sets_initialized, "Not initialized");
1192  return &vm_sigs;
1193}
1194
1195// These are signals that are blocked during cond_wait to allow debugger in
1196sigset_t* os::Solaris::allowdebug_blocked_signals() {
1197  assert(signal_sets_initialized, "Not initialized");
1198  return &allowdebug_blocked_sigs;
1199}
1200
1201
1202void _handle_uncaught_cxx_exception() {
1203  VMError err("An uncaught C++ exception");
1204  err.report_and_die();
1205}
1206
1207
1208// First crack at OS-specific initialization, from inside the new thread.
1209void os::initialize_thread(Thread* thr) {
1210  int r = thr_main() ;
1211  guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1212  if (r) {
1213    JavaThread* jt = (JavaThread *)thr;
1214    assert(jt != NULL,"Sanity check");
1215    size_t stack_size;
1216    address base = jt->stack_base();
1217    if (Arguments::created_by_java_launcher()) {
1218      // Use 2MB to allow for Solaris 7 64 bit mode.
1219      stack_size = JavaThread::stack_size_at_create() == 0
1220        ? 2048*K : JavaThread::stack_size_at_create();
1221
1222      // There are rare cases when we may have already used more than
1223      // the basic stack size allotment before this method is invoked.
1224      // Attempt to allow for a normally sized java_stack.
1225      size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1226      stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1227    } else {
1228      // 6269555: If we were not created by a Java launcher, i.e. if we are
1229      // running embedded in a native application, treat the primordial thread
1230      // as much like a native attached thread as possible.  This means using
1231      // the current stack size from thr_stksegment(), unless it is too large
1232      // to reliably setup guard pages.  A reasonable max size is 8MB.
1233      size_t current_size = current_stack_size();
1234      // This should never happen, but just in case....
1235      if (current_size == 0) current_size = 2 * K * K;
1236      stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1237    }
1238    address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1239    stack_size = (size_t)(base - bottom);
1240
1241    assert(stack_size > 0, "Stack size calculation problem");
1242
1243    if (stack_size > jt->stack_size()) {
1244      NOT_PRODUCT(
1245        struct rlimit limits;
1246        getrlimit(RLIMIT_STACK, &limits);
1247        size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1248        assert(size >= jt->stack_size(), "Stack size problem in main thread");
1249      )
1250      tty->print_cr(
1251        "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1252        "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1253        "See limit(1) to increase the stack size limit.",
1254        stack_size / K, jt->stack_size() / K);
1255      vm_exit(1);
1256    }
1257    assert(jt->stack_size() >= stack_size,
1258          "Attempt to map more stack than was allocated");
1259    jt->set_stack_size(stack_size);
1260  }
1261
1262  // With the T2 libthread (T1 is no longer supported) threads are always bound
1263  // and we use stackbanging in all cases.
1264
1265  os::Solaris::init_thread_fpu_state();
1266  std::set_terminate(_handle_uncaught_cxx_exception);
1267}
1268
1269
1270
1271// Free Solaris resources related to the OSThread
1272void os::free_thread(OSThread* osthread) {
1273  assert(osthread != NULL, "os::free_thread but osthread not set");
1274
1275
1276  // We are told to free resources of the argument thread,
1277  // but we can only really operate on the current thread.
1278  // The main thread must take the VMThread down synchronously
1279  // before the main thread exits and frees up CodeHeap
1280  guarantee((Thread::current()->osthread() == osthread
1281     || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1282  if (Thread::current()->osthread() == osthread) {
1283    // Restore caller's signal mask
1284    sigset_t sigmask = osthread->caller_sigmask();
1285    thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1286  }
1287  delete osthread;
1288}
1289
1290void os::pd_start_thread(Thread* thread) {
1291  int status = thr_continue(thread->osthread()->thread_id());
1292  assert_status(status == 0, status, "thr_continue failed");
1293}
1294
1295
1296intx os::current_thread_id() {
1297  return (intx)thr_self();
1298}
1299
1300static pid_t _initial_pid = 0;
1301
1302int os::current_process_id() {
1303  return (int)(_initial_pid ? _initial_pid : getpid());
1304}
1305
1306int os::allocate_thread_local_storage() {
1307  // %%%       in Win32 this allocates a memory segment pointed to by a
1308  //           register.  Dan Stein can implement a similar feature in
1309  //           Solaris.  Alternatively, the VM can do the same thing
1310  //           explicitly: malloc some storage and keep the pointer in a
1311  //           register (which is part of the thread's context) (or keep it
1312  //           in TLS).
1313  // %%%       In current versions of Solaris, thr_self and TSD can
1314  //           be accessed via short sequences of displaced indirections.
1315  //           The value of thr_self is available as %g7(36).
1316  //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1317  //           assuming that the current thread already has a value bound to k.
1318  //           It may be worth experimenting with such access patterns,
1319  //           and later having the parameters formally exported from a Solaris
1320  //           interface.  I think, however, that it will be faster to
1321  //           maintain the invariant that %g2 always contains the
1322  //           JavaThread in Java code, and have stubs simply
1323  //           treat %g2 as a caller-save register, preserving it in a %lN.
1324  thread_key_t tk;
1325  if (thr_keycreate( &tk, NULL ) )
1326    fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1327                  "(%s)", strerror(errno)));
1328  return int(tk);
1329}
1330
1331void os::free_thread_local_storage(int index) {
1332  // %%% don't think we need anything here
1333  // if ( pthread_key_delete((pthread_key_t) tk) )
1334  //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1335}
1336
1337#define SMALLINT 32   // libthread allocate for tsd_common is a version specific
1338                      // small number - point is NO swap space available
1339void os::thread_local_storage_at_put(int index, void* value) {
1340  // %%% this is used only in threadLocalStorage.cpp
1341  if (thr_setspecific((thread_key_t)index, value)) {
1342    if (errno == ENOMEM) {
1343       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1344                             "thr_setspecific: out of swap space");
1345    } else {
1346      fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1347                    "(%s)", strerror(errno)));
1348    }
1349  } else {
1350      ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1351  }
1352}
1353
1354// This function could be called before TLS is initialized, for example, when
1355// VM receives an async signal or when VM causes a fatal error during
1356// initialization. Return NULL if thr_getspecific() fails.
1357void* os::thread_local_storage_at(int index) {
1358  // %%% this is used only in threadLocalStorage.cpp
1359  void* r = NULL;
1360  return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1361}
1362
1363
1364// gethrtime() should be monotonic according to the documentation,
1365// but some virtualized platforms are known to break this guarantee.
1366// getTimeNanos() must be guaranteed not to move backwards, so we
1367// are forced to add a check here.
1368inline hrtime_t getTimeNanos() {
1369  const hrtime_t now = gethrtime();
1370  const hrtime_t prev = max_hrtime;
1371  if (now <= prev) {
1372    return prev;   // same or retrograde time;
1373  }
1374  const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1375  assert(obsv >= prev, "invariant");   // Monotonicity
1376  // If the CAS succeeded then we're done and return "now".
1377  // If the CAS failed and the observed value "obsv" is >= now then
1378  // we should return "obsv".  If the CAS failed and now > obsv > prv then
1379  // some other thread raced this thread and installed a new value, in which case
1380  // we could either (a) retry the entire operation, (b) retry trying to install now
1381  // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1382  // we might discard a higher "now" value in deference to a slightly lower but freshly
1383  // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1384  // to (a) or (b) -- and greatly reduces coherence traffic.
1385  // We might also condition (c) on the magnitude of the delta between obsv and now.
1386  // Avoiding excessive CAS operations to hot RW locations is critical.
1387  // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1388  return (prev == obsv) ? now : obsv;
1389}
1390
1391// Time since start-up in seconds to a fine granularity.
1392// Used by VMSelfDestructTimer and the MemProfiler.
1393double os::elapsedTime() {
1394  return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1395}
1396
1397jlong os::elapsed_counter() {
1398  return (jlong)(getTimeNanos() - first_hrtime);
1399}
1400
1401jlong os::elapsed_frequency() {
1402   return hrtime_hz;
1403}
1404
1405// Return the real, user, and system times in seconds from an
1406// arbitrary fixed point in the past.
1407bool os::getTimesSecs(double* process_real_time,
1408                  double* process_user_time,
1409                  double* process_system_time) {
1410  struct tms ticks;
1411  clock_t real_ticks = times(&ticks);
1412
1413  if (real_ticks == (clock_t) (-1)) {
1414    return false;
1415  } else {
1416    double ticks_per_second = (double) clock_tics_per_sec;
1417    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1418    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1419    // For consistency return the real time from getTimeNanos()
1420    // converted to seconds.
1421    *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1422
1423    return true;
1424  }
1425}
1426
1427bool os::supports_vtime() { return true; }
1428
1429bool os::enable_vtime() {
1430  int fd = ::open("/proc/self/ctl", O_WRONLY);
1431  if (fd == -1)
1432    return false;
1433
1434  long cmd[] = { PCSET, PR_MSACCT };
1435  int res = ::write(fd, cmd, sizeof(long) * 2);
1436  ::close(fd);
1437  if (res != sizeof(long) * 2)
1438    return false;
1439
1440  return true;
1441}
1442
1443bool os::vtime_enabled() {
1444  int fd = ::open("/proc/self/status", O_RDONLY);
1445  if (fd == -1)
1446    return false;
1447
1448  pstatus_t status;
1449  int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1450  ::close(fd);
1451  if (res != sizeof(pstatus_t))
1452    return false;
1453
1454  return status.pr_flags & PR_MSACCT;
1455}
1456
1457double os::elapsedVTime() {
1458  return (double)gethrvtime() / (double)hrtime_hz;
1459}
1460
1461// Used internally for comparisons only
1462// getTimeMillis guaranteed to not move backwards on Solaris
1463jlong getTimeMillis() {
1464  jlong nanotime = getTimeNanos();
1465  return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1466}
1467
1468// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1469jlong os::javaTimeMillis() {
1470  timeval t;
1471  if (gettimeofday( &t, NULL) == -1)
1472    fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1473  return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1474}
1475
1476jlong os::javaTimeNanos() {
1477  return (jlong)getTimeNanos();
1478}
1479
1480void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1481  info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1482  info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1483  info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1484  info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1485}
1486
1487char * os::local_time_string(char *buf, size_t buflen) {
1488  struct tm t;
1489  time_t long_time;
1490  time(&long_time);
1491  localtime_r(&long_time, &t);
1492  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1493               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1494               t.tm_hour, t.tm_min, t.tm_sec);
1495  return buf;
1496}
1497
1498// Note: os::shutdown() might be called very early during initialization, or
1499// called from signal handler. Before adding something to os::shutdown(), make
1500// sure it is async-safe and can handle partially initialized VM.
1501void os::shutdown() {
1502
1503  // allow PerfMemory to attempt cleanup of any persistent resources
1504  perfMemory_exit();
1505
1506  // needs to remove object in file system
1507  AttachListener::abort();
1508
1509  // flush buffered output, finish log files
1510  ostream_abort();
1511
1512  // Check for abort hook
1513  abort_hook_t abort_hook = Arguments::abort_hook();
1514  if (abort_hook != NULL) {
1515    abort_hook();
1516  }
1517}
1518
1519// Note: os::abort() might be called very early during initialization, or
1520// called from signal handler. Before adding something to os::abort(), make
1521// sure it is async-safe and can handle partially initialized VM.
1522void os::abort(bool dump_core) {
1523  os::shutdown();
1524  if (dump_core) {
1525#ifndef PRODUCT
1526    fdStream out(defaultStream::output_fd());
1527    out.print_raw("Current thread is ");
1528    char buf[16];
1529    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1530    out.print_raw_cr(buf);
1531    out.print_raw_cr("Dumping core ...");
1532#endif
1533    ::abort(); // dump core (for debugging)
1534  }
1535
1536  ::exit(1);
1537}
1538
1539// Die immediately, no exit hook, no abort hook, no cleanup.
1540void os::die() {
1541  ::abort(); // dump core (for debugging)
1542}
1543
1544// unused
1545void os::set_error_file(const char *logfile) {}
1546
1547// DLL functions
1548
1549const char* os::dll_file_extension() { return ".so"; }
1550
1551// This must be hard coded because it's the system's temporary
1552// directory not the java application's temp directory, ala java.io.tmpdir.
1553const char* os::get_temp_directory() { return "/tmp"; }
1554
1555static bool file_exists(const char* filename) {
1556  struct stat statbuf;
1557  if (filename == NULL || strlen(filename) == 0) {
1558    return false;
1559  }
1560  return os::stat(filename, &statbuf) == 0;
1561}
1562
1563bool os::dll_build_name(char* buffer, size_t buflen,
1564                        const char* pname, const char* fname) {
1565  bool retval = false;
1566  const size_t pnamelen = pname ? strlen(pname) : 0;
1567
1568  // Return error on buffer overflow.
1569  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1570    return retval;
1571  }
1572
1573  if (pnamelen == 0) {
1574    snprintf(buffer, buflen, "lib%s.so", fname);
1575    retval = true;
1576  } else if (strchr(pname, *os::path_separator()) != NULL) {
1577    int n;
1578    char** pelements = split_path(pname, &n);
1579    if (pelements == NULL) {
1580      return false;
1581    }
1582    for (int i = 0 ; i < n ; i++) {
1583      // really shouldn't be NULL but what the heck, check can't hurt
1584      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1585        continue; // skip the empty path values
1586      }
1587      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1588      if (file_exists(buffer)) {
1589        retval = true;
1590        break;
1591      }
1592    }
1593    // release the storage
1594    for (int i = 0 ; i < n ; i++) {
1595      if (pelements[i] != NULL) {
1596        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1597      }
1598    }
1599    if (pelements != NULL) {
1600      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1601    }
1602  } else {
1603    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1604    retval = true;
1605  }
1606  return retval;
1607}
1608
1609// check if addr is inside libjvm.so
1610bool os::address_is_in_vm(address addr) {
1611  static address libjvm_base_addr;
1612  Dl_info dlinfo;
1613
1614  if (libjvm_base_addr == NULL) {
1615    if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1616      libjvm_base_addr = (address)dlinfo.dli_fbase;
1617    }
1618    assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1619  }
1620
1621  if (dladdr((void *)addr, &dlinfo) != 0) {
1622    if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1623  }
1624
1625  return false;
1626}
1627
1628typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1629static dladdr1_func_type dladdr1_func = NULL;
1630
1631bool os::dll_address_to_function_name(address addr, char *buf,
1632                                      int buflen, int * offset) {
1633  // buf is not optional, but offset is optional
1634  assert(buf != NULL, "sanity check");
1635
1636  Dl_info dlinfo;
1637
1638  // dladdr1_func was initialized in os::init()
1639  if (dladdr1_func != NULL) {
1640    // yes, we have dladdr1
1641
1642    // Support for dladdr1 is checked at runtime; it may be
1643    // available even if the vm is built on a machine that does
1644    // not have dladdr1 support.  Make sure there is a value for
1645    // RTLD_DL_SYMENT.
1646    #ifndef RTLD_DL_SYMENT
1647    #define RTLD_DL_SYMENT 1
1648    #endif
1649#ifdef _LP64
1650    Elf64_Sym * info;
1651#else
1652    Elf32_Sym * info;
1653#endif
1654    if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1655                     RTLD_DL_SYMENT) != 0) {
1656      // see if we have a matching symbol that covers our address
1657      if (dlinfo.dli_saddr != NULL &&
1658          (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1659        if (dlinfo.dli_sname != NULL) {
1660          if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1661            jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1662          }
1663          if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1664          return true;
1665        }
1666      }
1667      // no matching symbol so try for just file info
1668      if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1669        if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1670                            buf, buflen, offset, dlinfo.dli_fname)) {
1671          return true;
1672        }
1673      }
1674    }
1675    buf[0] = '\0';
1676    if (offset != NULL) *offset  = -1;
1677    return false;
1678  }
1679
1680  // no, only dladdr is available
1681  if (dladdr((void *)addr, &dlinfo) != 0) {
1682    // see if we have a matching symbol
1683    if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1684      if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1685        jio_snprintf(buf, buflen, dlinfo.dli_sname);
1686      }
1687      if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1688      return true;
1689    }
1690    // no matching symbol so try for just file info
1691    if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1692      if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1693                          buf, buflen, offset, dlinfo.dli_fname)) {
1694        return true;
1695      }
1696    }
1697  }
1698  buf[0] = '\0';
1699  if (offset != NULL) *offset  = -1;
1700  return false;
1701}
1702
1703bool os::dll_address_to_library_name(address addr, char* buf,
1704                                     int buflen, int* offset) {
1705  // buf is not optional, but offset is optional
1706  assert(buf != NULL, "sanity check");
1707
1708  Dl_info dlinfo;
1709
1710  if (dladdr((void*)addr, &dlinfo) != 0) {
1711    if (dlinfo.dli_fname != NULL) {
1712      jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1713    }
1714    if (dlinfo.dli_fbase != NULL && offset != NULL) {
1715      *offset = addr - (address)dlinfo.dli_fbase;
1716    }
1717    return true;
1718  }
1719
1720  buf[0] = '\0';
1721  if (offset) *offset = -1;
1722  return false;
1723}
1724
1725// Prints the names and full paths of all opened dynamic libraries
1726// for current process
1727void os::print_dll_info(outputStream * st) {
1728  Dl_info dli;
1729  void *handle;
1730  Link_map *map;
1731  Link_map *p;
1732
1733  st->print_cr("Dynamic libraries:"); st->flush();
1734
1735  if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1736      dli.dli_fname == NULL) {
1737    st->print_cr("Error: Cannot print dynamic libraries.");
1738    return;
1739  }
1740  handle = dlopen(dli.dli_fname, RTLD_LAZY);
1741  if (handle == NULL) {
1742    st->print_cr("Error: Cannot print dynamic libraries.");
1743    return;
1744  }
1745  dlinfo(handle, RTLD_DI_LINKMAP, &map);
1746  if (map == NULL) {
1747    st->print_cr("Error: Cannot print dynamic libraries.");
1748    return;
1749  }
1750
1751  while (map->l_prev != NULL)
1752    map = map->l_prev;
1753
1754  while (map != NULL) {
1755    st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1756    map = map->l_next;
1757  }
1758
1759  dlclose(handle);
1760}
1761
1762  // Loads .dll/.so and
1763  // in case of error it checks if .dll/.so was built for the
1764  // same architecture as Hotspot is running on
1765
1766void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1767{
1768  void * result= ::dlopen(filename, RTLD_LAZY);
1769  if (result != NULL) {
1770    // Successful loading
1771    return result;
1772  }
1773
1774  Elf32_Ehdr elf_head;
1775
1776  // Read system error message into ebuf
1777  // It may or may not be overwritten below
1778  ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1779  ebuf[ebuflen-1]='\0';
1780  int diag_msg_max_length=ebuflen-strlen(ebuf);
1781  char* diag_msg_buf=ebuf+strlen(ebuf);
1782
1783  if (diag_msg_max_length==0) {
1784    // No more space in ebuf for additional diagnostics message
1785    return NULL;
1786  }
1787
1788
1789  int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1790
1791  if (file_descriptor < 0) {
1792    // Can't open library, report dlerror() message
1793    return NULL;
1794  }
1795
1796  bool failed_to_read_elf_head=
1797    (sizeof(elf_head)!=
1798        (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1799
1800  ::close(file_descriptor);
1801  if (failed_to_read_elf_head) {
1802    // file i/o error - report dlerror() msg
1803    return NULL;
1804  }
1805
1806  typedef struct {
1807    Elf32_Half  code;         // Actual value as defined in elf.h
1808    Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1809    char        elf_class;    // 32 or 64 bit
1810    char        endianess;    // MSB or LSB
1811    char*       name;         // String representation
1812  } arch_t;
1813
1814  static const arch_t arch_array[]={
1815    {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1816    {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1817    {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1818    {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1819    {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1820    {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1821    {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1822    {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1823    {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1824    {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1825  };
1826
1827  #if  (defined IA32)
1828    static  Elf32_Half running_arch_code=EM_386;
1829  #elif   (defined AMD64)
1830    static  Elf32_Half running_arch_code=EM_X86_64;
1831  #elif  (defined IA64)
1832    static  Elf32_Half running_arch_code=EM_IA_64;
1833  #elif  (defined __sparc) && (defined _LP64)
1834    static  Elf32_Half running_arch_code=EM_SPARCV9;
1835  #elif  (defined __sparc) && (!defined _LP64)
1836    static  Elf32_Half running_arch_code=EM_SPARC;
1837  #elif  (defined __powerpc64__)
1838    static  Elf32_Half running_arch_code=EM_PPC64;
1839  #elif  (defined __powerpc__)
1840    static  Elf32_Half running_arch_code=EM_PPC;
1841  #elif (defined ARM)
1842    static  Elf32_Half running_arch_code=EM_ARM;
1843  #else
1844    #error Method os::dll_load requires that one of following is defined:\
1845         IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1846  #endif
1847
1848  // Identify compatability class for VM's architecture and library's architecture
1849  // Obtain string descriptions for architectures
1850
1851  arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1852  int running_arch_index=-1;
1853
1854  for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1855    if (running_arch_code == arch_array[i].code) {
1856      running_arch_index    = i;
1857    }
1858    if (lib_arch.code == arch_array[i].code) {
1859      lib_arch.compat_class = arch_array[i].compat_class;
1860      lib_arch.name         = arch_array[i].name;
1861    }
1862  }
1863
1864  assert(running_arch_index != -1,
1865    "Didn't find running architecture code (running_arch_code) in arch_array");
1866  if (running_arch_index == -1) {
1867    // Even though running architecture detection failed
1868    // we may still continue with reporting dlerror() message
1869    return NULL;
1870  }
1871
1872  if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1873    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1874    return NULL;
1875  }
1876
1877  if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1878    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1879    return NULL;
1880  }
1881
1882  if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1883    if ( lib_arch.name!=NULL ) {
1884      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1885        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1886        lib_arch.name, arch_array[running_arch_index].name);
1887    } else {
1888      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1889      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1890        lib_arch.code,
1891        arch_array[running_arch_index].name);
1892    }
1893  }
1894
1895  return NULL;
1896}
1897
1898void* os::dll_lookup(void* handle, const char* name) {
1899  return dlsym(handle, name);
1900}
1901
1902void* os::get_default_process_handle() {
1903  return (void*)::dlopen(NULL, RTLD_LAZY);
1904}
1905
1906int os::stat(const char *path, struct stat *sbuf) {
1907  char pathbuf[MAX_PATH];
1908  if (strlen(path) > MAX_PATH - 1) {
1909    errno = ENAMETOOLONG;
1910    return -1;
1911  }
1912  os::native_path(strcpy(pathbuf, path));
1913  return ::stat(pathbuf, sbuf);
1914}
1915
1916static bool _print_ascii_file(const char* filename, outputStream* st) {
1917  int fd = ::open(filename, O_RDONLY);
1918  if (fd == -1) {
1919     return false;
1920  }
1921
1922  char buf[32];
1923  int bytes;
1924  while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1925    st->print_raw(buf, bytes);
1926  }
1927
1928  ::close(fd);
1929
1930  return true;
1931}
1932
1933void os::print_os_info_brief(outputStream* st) {
1934  os::Solaris::print_distro_info(st);
1935
1936  os::Posix::print_uname_info(st);
1937
1938  os::Solaris::print_libversion_info(st);
1939}
1940
1941void os::print_os_info(outputStream* st) {
1942  st->print("OS:");
1943
1944  os::Solaris::print_distro_info(st);
1945
1946  os::Posix::print_uname_info(st);
1947
1948  os::Solaris::print_libversion_info(st);
1949
1950  os::Posix::print_rlimit_info(st);
1951
1952  os::Posix::print_load_average(st);
1953}
1954
1955void os::Solaris::print_distro_info(outputStream* st) {
1956  if (!_print_ascii_file("/etc/release", st)) {
1957      st->print("Solaris");
1958    }
1959    st->cr();
1960}
1961
1962void os::Solaris::print_libversion_info(outputStream* st) {
1963  st->print("  (T2 libthread)");
1964  st->cr();
1965}
1966
1967static bool check_addr0(outputStream* st) {
1968  jboolean status = false;
1969  int fd = ::open("/proc/self/map",O_RDONLY);
1970  if (fd >= 0) {
1971    prmap_t p;
1972    while(::read(fd, &p, sizeof(p)) > 0) {
1973      if (p.pr_vaddr == 0x0) {
1974        st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1975        st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1976        st->print("Access:");
1977        st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1978        st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1979        st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1980        st->cr();
1981        status = true;
1982      }
1983    }
1984    ::close(fd);
1985  }
1986  return status;
1987}
1988
1989void os::pd_print_cpu_info(outputStream* st) {
1990  // Nothing to do for now.
1991}
1992
1993void os::print_memory_info(outputStream* st) {
1994  st->print("Memory:");
1995  st->print(" %dk page", os::vm_page_size()>>10);
1996  st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
1997  st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
1998  st->cr();
1999  (void) check_addr0(st);
2000}
2001
2002void os::print_siginfo(outputStream* st, void* siginfo) {
2003  const siginfo_t* si = (const siginfo_t*)siginfo;
2004
2005  os::Posix::print_siginfo_brief(st, si);
2006
2007  if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2008      UseSharedSpaces) {
2009    FileMapInfo* mapinfo = FileMapInfo::current_info();
2010    if (mapinfo->is_in_shared_space(si->si_addr)) {
2011      st->print("\n\nError accessing class data sharing archive."   \
2012                " Mapped file inaccessible during execution, "      \
2013                " possible disk/network problem.");
2014    }
2015  }
2016  st->cr();
2017}
2018
2019// Moved from whole group, because we need them here for diagnostic
2020// prints.
2021#define OLDMAXSIGNUM 32
2022static int Maxsignum = 0;
2023static int *ourSigFlags = NULL;
2024
2025extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2026
2027int os::Solaris::get_our_sigflags(int sig) {
2028  assert(ourSigFlags!=NULL, "signal data structure not initialized");
2029  assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2030  return ourSigFlags[sig];
2031}
2032
2033void os::Solaris::set_our_sigflags(int sig, int flags) {
2034  assert(ourSigFlags!=NULL, "signal data structure not initialized");
2035  assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2036  ourSigFlags[sig] = flags;
2037}
2038
2039
2040static const char* get_signal_handler_name(address handler,
2041                                           char* buf, int buflen) {
2042  int offset;
2043  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2044  if (found) {
2045    // skip directory names
2046    const char *p1, *p2;
2047    p1 = buf;
2048    size_t len = strlen(os::file_separator());
2049    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2050    jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2051  } else {
2052    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2053  }
2054  return buf;
2055}
2056
2057static void print_signal_handler(outputStream* st, int sig,
2058                                  char* buf, size_t buflen) {
2059  struct sigaction sa;
2060
2061  sigaction(sig, NULL, &sa);
2062
2063  st->print("%s: ", os::exception_name(sig, buf, buflen));
2064
2065  address handler = (sa.sa_flags & SA_SIGINFO)
2066                  ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2067                  : CAST_FROM_FN_PTR(address, sa.sa_handler);
2068
2069  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2070    st->print("SIG_DFL");
2071  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2072    st->print("SIG_IGN");
2073  } else {
2074    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2075  }
2076
2077  st->print(", sa_mask[0]=");
2078  os::Posix::print_signal_set_short(st, &sa.sa_mask);
2079
2080  address rh = VMError::get_resetted_sighandler(sig);
2081  // May be, handler was resetted by VMError?
2082  if(rh != NULL) {
2083    handler = rh;
2084    sa.sa_flags = VMError::get_resetted_sigflags(sig);
2085  }
2086
2087  st->print(", sa_flags=");
2088  os::Posix::print_sa_flags(st, sa.sa_flags);
2089
2090  // Check: is it our handler?
2091  if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2092     handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2093    // It is our signal handler
2094    // check for flags
2095    if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2096      st->print(
2097        ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2098        os::Solaris::get_our_sigflags(sig));
2099    }
2100  }
2101  st->cr();
2102}
2103
2104void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2105  st->print_cr("Signal Handlers:");
2106  print_signal_handler(st, SIGSEGV, buf, buflen);
2107  print_signal_handler(st, SIGBUS , buf, buflen);
2108  print_signal_handler(st, SIGFPE , buf, buflen);
2109  print_signal_handler(st, SIGPIPE, buf, buflen);
2110  print_signal_handler(st, SIGXFSZ, buf, buflen);
2111  print_signal_handler(st, SIGILL , buf, buflen);
2112  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2113  print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2114  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2115  print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2116  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2117  print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2118  print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2119  print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2120}
2121
2122static char saved_jvm_path[MAXPATHLEN] = { 0 };
2123
2124// Find the full path to the current module, libjvm.so
2125void os::jvm_path(char *buf, jint buflen) {
2126  // Error checking.
2127  if (buflen < MAXPATHLEN) {
2128    assert(false, "must use a large-enough buffer");
2129    buf[0] = '\0';
2130    return;
2131  }
2132  // Lazy resolve the path to current module.
2133  if (saved_jvm_path[0] != 0) {
2134    strcpy(buf, saved_jvm_path);
2135    return;
2136  }
2137
2138  Dl_info dlinfo;
2139  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2140  assert(ret != 0, "cannot locate libjvm");
2141  if (ret != 0 && dlinfo.dli_fname != NULL) {
2142    realpath((char *)dlinfo.dli_fname, buf);
2143  } else {
2144    buf[0] = '\0';
2145    return;
2146  }
2147
2148  if (Arguments::sun_java_launcher_is_altjvm()) {
2149    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2150    // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2151    // If "/jre/lib/" appears at the right place in the string, then
2152    // assume we are installed in a JDK and we're done.  Otherwise, check
2153    // for a JAVA_HOME environment variable and fix up the path so it
2154    // looks like libjvm.so is installed there (append a fake suffix
2155    // hotspot/libjvm.so).
2156    const char *p = buf + strlen(buf) - 1;
2157    for (int count = 0; p > buf && count < 5; ++count) {
2158      for (--p; p > buf && *p != '/'; --p)
2159        /* empty */ ;
2160    }
2161
2162    if (strncmp(p, "/jre/lib/", 9) != 0) {
2163      // Look for JAVA_HOME in the environment.
2164      char* java_home_var = ::getenv("JAVA_HOME");
2165      if (java_home_var != NULL && java_home_var[0] != 0) {
2166        char cpu_arch[12];
2167        char* jrelib_p;
2168        int   len;
2169        sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2170#ifdef _LP64
2171        // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2172        if (strcmp(cpu_arch, "sparc") == 0) {
2173          strcat(cpu_arch, "v9");
2174        } else if (strcmp(cpu_arch, "i386") == 0) {
2175          strcpy(cpu_arch, "amd64");
2176        }
2177#endif
2178        // Check the current module name "libjvm.so".
2179        p = strrchr(buf, '/');
2180        assert(strstr(p, "/libjvm") == p, "invalid library name");
2181
2182        realpath(java_home_var, buf);
2183        // determine if this is a legacy image or modules image
2184        // modules image doesn't have "jre" subdirectory
2185        len = strlen(buf);
2186        jrelib_p = buf + len;
2187        snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2188        if (0 != access(buf, F_OK)) {
2189          snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2190        }
2191
2192        if (0 == access(buf, F_OK)) {
2193          // Use current module name "libjvm.so"
2194          len = strlen(buf);
2195          snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2196        } else {
2197          // Go back to path of .so
2198          realpath((char *)dlinfo.dli_fname, buf);
2199        }
2200      }
2201    }
2202  }
2203
2204  strcpy(saved_jvm_path, buf);
2205}
2206
2207
2208void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2209  // no prefix required, not even "_"
2210}
2211
2212
2213void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2214  // no suffix required
2215}
2216
2217// This method is a copy of JDK's sysGetLastErrorString
2218// from src/solaris/hpi/src/system_md.c
2219
2220size_t os::lasterror(char *buf, size_t len) {
2221
2222  if (errno == 0)  return 0;
2223
2224  const char *s = ::strerror(errno);
2225  size_t n = ::strlen(s);
2226  if (n >= len) {
2227    n = len - 1;
2228  }
2229  ::strncpy(buf, s, n);
2230  buf[n] = '\0';
2231  return n;
2232}
2233
2234
2235// sun.misc.Signal
2236
2237extern "C" {
2238  static void UserHandler(int sig, void *siginfo, void *context) {
2239    // Ctrl-C is pressed during error reporting, likely because the error
2240    // handler fails to abort. Let VM die immediately.
2241    if (sig == SIGINT && is_error_reported()) {
2242       os::die();
2243    }
2244
2245    os::signal_notify(sig);
2246    // We do not need to reinstate the signal handler each time...
2247  }
2248}
2249
2250void* os::user_handler() {
2251  return CAST_FROM_FN_PTR(void*, UserHandler);
2252}
2253
2254class Semaphore : public StackObj {
2255  public:
2256    Semaphore();
2257    ~Semaphore();
2258    void signal();
2259    void wait();
2260    bool trywait();
2261    bool timedwait(unsigned int sec, int nsec);
2262  private:
2263    sema_t _semaphore;
2264};
2265
2266
2267Semaphore::Semaphore() {
2268  sema_init(&_semaphore, 0, NULL, NULL);
2269}
2270
2271Semaphore::~Semaphore() {
2272  sema_destroy(&_semaphore);
2273}
2274
2275void Semaphore::signal() {
2276  sema_post(&_semaphore);
2277}
2278
2279void Semaphore::wait() {
2280  sema_wait(&_semaphore);
2281}
2282
2283bool Semaphore::trywait() {
2284  return sema_trywait(&_semaphore) == 0;
2285}
2286
2287bool Semaphore::timedwait(unsigned int sec, int nsec) {
2288  struct timespec ts;
2289  unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2290
2291  while (1) {
2292    int result = sema_timedwait(&_semaphore, &ts);
2293    if (result == 0) {
2294      return true;
2295    } else if (errno == EINTR) {
2296      continue;
2297    } else if (errno == ETIME) {
2298      return false;
2299    } else {
2300      return false;
2301    }
2302  }
2303}
2304
2305extern "C" {
2306  typedef void (*sa_handler_t)(int);
2307  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2308}
2309
2310void* os::signal(int signal_number, void* handler) {
2311  struct sigaction sigAct, oldSigAct;
2312  sigfillset(&(sigAct.sa_mask));
2313  sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2314  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2315
2316  if (sigaction(signal_number, &sigAct, &oldSigAct))
2317    // -1 means registration failed
2318    return (void *)-1;
2319
2320  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2321}
2322
2323void os::signal_raise(int signal_number) {
2324  raise(signal_number);
2325}
2326
2327/*
2328 * The following code is moved from os.cpp for making this
2329 * code platform specific, which it is by its very nature.
2330 */
2331
2332// a counter for each possible signal value
2333static int Sigexit = 0;
2334static int Maxlibjsigsigs;
2335static jint *pending_signals = NULL;
2336static int *preinstalled_sigs = NULL;
2337static struct sigaction *chainedsigactions = NULL;
2338static sema_t sig_sem;
2339typedef int (*version_getting_t)();
2340version_getting_t os::Solaris::get_libjsig_version = NULL;
2341static int libjsigversion = NULL;
2342
2343int os::sigexitnum_pd() {
2344  assert(Sigexit > 0, "signal memory not yet initialized");
2345  return Sigexit;
2346}
2347
2348void os::Solaris::init_signal_mem() {
2349  // Initialize signal structures
2350  Maxsignum = SIGRTMAX;
2351  Sigexit = Maxsignum+1;
2352  assert(Maxsignum >0, "Unable to obtain max signal number");
2353
2354  Maxlibjsigsigs = Maxsignum;
2355
2356  // pending_signals has one int per signal
2357  // The additional signal is for SIGEXIT - exit signal to signal_thread
2358  pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2359  memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2360
2361  if (UseSignalChaining) {
2362     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2363       * (Maxsignum + 1), mtInternal);
2364     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2365     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2366     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2367  }
2368  ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2369  memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2370}
2371
2372void os::signal_init_pd() {
2373  int ret;
2374
2375  ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2376  assert(ret == 0, "sema_init() failed");
2377}
2378
2379void os::signal_notify(int signal_number) {
2380  int ret;
2381
2382  Atomic::inc(&pending_signals[signal_number]);
2383  ret = ::sema_post(&sig_sem);
2384  assert(ret == 0, "sema_post() failed");
2385}
2386
2387static int check_pending_signals(bool wait_for_signal) {
2388  int ret;
2389  while (true) {
2390    for (int i = 0; i < Sigexit + 1; i++) {
2391      jint n = pending_signals[i];
2392      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2393        return i;
2394      }
2395    }
2396    if (!wait_for_signal) {
2397      return -1;
2398    }
2399    JavaThread *thread = JavaThread::current();
2400    ThreadBlockInVM tbivm(thread);
2401
2402    bool threadIsSuspended;
2403    do {
2404      thread->set_suspend_equivalent();
2405      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2406      while((ret = ::sema_wait(&sig_sem)) == EINTR)
2407          ;
2408      assert(ret == 0, "sema_wait() failed");
2409
2410      // were we externally suspended while we were waiting?
2411      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2412      if (threadIsSuspended) {
2413        //
2414        // The semaphore has been incremented, but while we were waiting
2415        // another thread suspended us. We don't want to continue running
2416        // while suspended because that would surprise the thread that
2417        // suspended us.
2418        //
2419        ret = ::sema_post(&sig_sem);
2420        assert(ret == 0, "sema_post() failed");
2421
2422        thread->java_suspend_self();
2423      }
2424    } while (threadIsSuspended);
2425  }
2426}
2427
2428int os::signal_lookup() {
2429  return check_pending_signals(false);
2430}
2431
2432int os::signal_wait() {
2433  return check_pending_signals(true);
2434}
2435
2436////////////////////////////////////////////////////////////////////////////////
2437// Virtual Memory
2438
2439static int page_size = -1;
2440
2441// The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2442// clear this var if support is not available.
2443static bool has_map_align = true;
2444
2445int os::vm_page_size() {
2446  assert(page_size != -1, "must call os::init");
2447  return page_size;
2448}
2449
2450// Solaris allocates memory by pages.
2451int os::vm_allocation_granularity() {
2452  assert(page_size != -1, "must call os::init");
2453  return page_size;
2454}
2455
2456static bool recoverable_mmap_error(int err) {
2457  // See if the error is one we can let the caller handle. This
2458  // list of errno values comes from the Solaris mmap(2) man page.
2459  switch (err) {
2460  case EBADF:
2461  case EINVAL:
2462  case ENOTSUP:
2463    // let the caller deal with these errors
2464    return true;
2465
2466  default:
2467    // Any remaining errors on this OS can cause our reserved mapping
2468    // to be lost. That can cause confusion where different data
2469    // structures think they have the same memory mapped. The worst
2470    // scenario is if both the VM and a library think they have the
2471    // same memory mapped.
2472    return false;
2473  }
2474}
2475
2476static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2477                                    int err) {
2478  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2479          ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2480          strerror(err), err);
2481}
2482
2483static void warn_fail_commit_memory(char* addr, size_t bytes,
2484                                    size_t alignment_hint, bool exec,
2485                                    int err) {
2486  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2487          ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2488          alignment_hint, exec, strerror(err), err);
2489}
2490
2491int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2492  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2493  size_t size = bytes;
2494  char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2495  if (res != NULL) {
2496    if (UseNUMAInterleaving) {
2497      numa_make_global(addr, bytes);
2498    }
2499    return 0;
2500  }
2501
2502  int err = errno;  // save errno from mmap() call in mmap_chunk()
2503
2504  if (!recoverable_mmap_error(err)) {
2505    warn_fail_commit_memory(addr, bytes, exec, err);
2506    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2507  }
2508
2509  return err;
2510}
2511
2512bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2513  return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2514}
2515
2516void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2517                                  const char* mesg) {
2518  assert(mesg != NULL, "mesg must be specified");
2519  int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2520  if (err != 0) {
2521    // the caller wants all commit errors to exit with the specified mesg:
2522    warn_fail_commit_memory(addr, bytes, exec, err);
2523    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2524  }
2525}
2526
2527int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2528                                    size_t alignment_hint, bool exec) {
2529  int err = Solaris::commit_memory_impl(addr, bytes, exec);
2530  if (err == 0) {
2531    if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
2532      // If the large page size has been set and the VM
2533      // is using large pages, use the large page size
2534      // if it is smaller than the alignment hint. This is
2535      // a case where the VM wants to use a larger alignment size
2536      // for its own reasons but still want to use large pages
2537      // (which is what matters to setting the mpss range.
2538      size_t page_size = 0;
2539      if (large_page_size() < alignment_hint) {
2540        assert(UseLargePages, "Expected to be here for large page use only");
2541        page_size = large_page_size();
2542      } else {
2543        // If the alignment hint is less than the large page
2544        // size, the VM wants a particular alignment (thus the hint)
2545        // for internal reasons.  Try to set the mpss range using
2546        // the alignment_hint.
2547        page_size = alignment_hint;
2548      }
2549      // Since this is a hint, ignore any failures.
2550      (void)Solaris::setup_large_pages(addr, bytes, page_size);
2551    }
2552  }
2553  return err;
2554}
2555
2556bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2557                          bool exec) {
2558  return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2559}
2560
2561void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2562                                  size_t alignment_hint, bool exec,
2563                                  const char* mesg) {
2564  assert(mesg != NULL, "mesg must be specified");
2565  int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2566  if (err != 0) {
2567    // the caller wants all commit errors to exit with the specified mesg:
2568    warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2569    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2570  }
2571}
2572
2573// Uncommit the pages in a specified region.
2574void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2575  if (madvise(addr, bytes, MADV_FREE) < 0) {
2576    debug_only(warning("MADV_FREE failed."));
2577    return;
2578  }
2579}
2580
2581bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2582  return os::commit_memory(addr, size, !ExecMem);
2583}
2584
2585bool os::remove_stack_guard_pages(char* addr, size_t size) {
2586  return os::uncommit_memory(addr, size);
2587}
2588
2589// Change the page size in a given range.
2590void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2591  assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2592  assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2593  if (UseLargePages) {
2594    Solaris::setup_large_pages(addr, bytes, alignment_hint);
2595  }
2596}
2597
2598// Tell the OS to make the range local to the first-touching LWP
2599void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2600  assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2601  if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2602    debug_only(warning("MADV_ACCESS_LWP failed."));
2603  }
2604}
2605
2606// Tell the OS that this range would be accessed from different LWPs.
2607void os::numa_make_global(char *addr, size_t bytes) {
2608  assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2609  if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2610    debug_only(warning("MADV_ACCESS_MANY failed."));
2611  }
2612}
2613
2614// Get the number of the locality groups.
2615size_t os::numa_get_groups_num() {
2616  size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2617  return n != -1 ? n : 1;
2618}
2619
2620// Get a list of leaf locality groups. A leaf lgroup is group that
2621// doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2622// board. An LWP is assigned to one of these groups upon creation.
2623size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2624   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2625     ids[0] = 0;
2626     return 1;
2627   }
2628   int result_size = 0, top = 1, bottom = 0, cur = 0;
2629   for (int k = 0; k < size; k++) {
2630     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2631                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2632     if (r == -1) {
2633       ids[0] = 0;
2634       return 1;
2635     }
2636     if (!r) {
2637       // That's a leaf node.
2638       assert (bottom <= cur, "Sanity check");
2639       // Check if the node has memory
2640       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2641                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2642         ids[bottom++] = ids[cur];
2643       }
2644     }
2645     top += r;
2646     cur++;
2647   }
2648   if (bottom == 0) {
2649     // Handle a situation, when the OS reports no memory available.
2650     // Assume UMA architecture.
2651     ids[0] = 0;
2652     return 1;
2653   }
2654   return bottom;
2655}
2656
2657// Detect the topology change. Typically happens during CPU plugging-unplugging.
2658bool os::numa_topology_changed() {
2659  int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2660  if (is_stale != -1 && is_stale) {
2661    Solaris::lgrp_fini(Solaris::lgrp_cookie());
2662    Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2663    assert(c != 0, "Failure to initialize LGRP API");
2664    Solaris::set_lgrp_cookie(c);
2665    return true;
2666  }
2667  return false;
2668}
2669
2670// Get the group id of the current LWP.
2671int os::numa_get_group_id() {
2672  int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2673  if (lgrp_id == -1) {
2674    return 0;
2675  }
2676  const int size = os::numa_get_groups_num();
2677  int *ids = (int*)alloca(size * sizeof(int));
2678
2679  // Get the ids of all lgroups with memory; r is the count.
2680  int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2681                                  (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2682  if (r <= 0) {
2683    return 0;
2684  }
2685  return ids[os::random() % r];
2686}
2687
2688// Request information about the page.
2689bool os::get_page_info(char *start, page_info* info) {
2690  const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2691  uint64_t addr = (uintptr_t)start;
2692  uint64_t outdata[2];
2693  uint_t validity = 0;
2694
2695  if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2696    return false;
2697  }
2698
2699  info->size = 0;
2700  info->lgrp_id = -1;
2701
2702  if ((validity & 1) != 0) {
2703    if ((validity & 2) != 0) {
2704      info->lgrp_id = outdata[0];
2705    }
2706    if ((validity & 4) != 0) {
2707      info->size = outdata[1];
2708    }
2709    return true;
2710  }
2711  return false;
2712}
2713
2714// Scan the pages from start to end until a page different than
2715// the one described in the info parameter is encountered.
2716char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2717  const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2718  const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2719  uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2720  uint_t validity[MAX_MEMINFO_CNT];
2721
2722  size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2723  uint64_t p = (uint64_t)start;
2724  while (p < (uint64_t)end) {
2725    addrs[0] = p;
2726    size_t addrs_count = 1;
2727    while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2728      addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2729      addrs_count++;
2730    }
2731
2732    if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2733      return NULL;
2734    }
2735
2736    size_t i = 0;
2737    for (; i < addrs_count; i++) {
2738      if ((validity[i] & 1) != 0) {
2739        if ((validity[i] & 4) != 0) {
2740          if (outdata[types * i + 1] != page_expected->size) {
2741            break;
2742          }
2743        } else
2744          if (page_expected->size != 0) {
2745            break;
2746          }
2747
2748        if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2749          if (outdata[types * i] != page_expected->lgrp_id) {
2750            break;
2751          }
2752        }
2753      } else {
2754        return NULL;
2755      }
2756    }
2757
2758    if (i < addrs_count) {
2759      if ((validity[i] & 2) != 0) {
2760        page_found->lgrp_id = outdata[types * i];
2761      } else {
2762        page_found->lgrp_id = -1;
2763      }
2764      if ((validity[i] & 4) != 0) {
2765        page_found->size = outdata[types * i + 1];
2766      } else {
2767        page_found->size = 0;
2768      }
2769      return (char*)addrs[i];
2770    }
2771
2772    p = addrs[addrs_count - 1] + page_size;
2773  }
2774  return end;
2775}
2776
2777bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2778  size_t size = bytes;
2779  // Map uncommitted pages PROT_NONE so we fail early if we touch an
2780  // uncommitted page. Otherwise, the read/write might succeed if we
2781  // have enough swap space to back the physical page.
2782  return
2783    NULL != Solaris::mmap_chunk(addr, size,
2784                                MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2785                                PROT_NONE);
2786}
2787
2788char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2789  char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2790
2791  if (b == MAP_FAILED) {
2792    return NULL;
2793  }
2794  return b;
2795}
2796
2797char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2798  char* addr = requested_addr;
2799  int flags = MAP_PRIVATE | MAP_NORESERVE;
2800
2801  assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2802
2803  if (fixed) {
2804    flags |= MAP_FIXED;
2805  } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2806    flags |= MAP_ALIGN;
2807    addr = (char*) alignment_hint;
2808  }
2809
2810  // Map uncommitted pages PROT_NONE so we fail early if we touch an
2811  // uncommitted page. Otherwise, the read/write might succeed if we
2812  // have enough swap space to back the physical page.
2813  return mmap_chunk(addr, bytes, flags, PROT_NONE);
2814}
2815
2816char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2817  char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2818
2819  guarantee(requested_addr == NULL || requested_addr == addr,
2820            "OS failed to return requested mmap address.");
2821  return addr;
2822}
2823
2824// Reserve memory at an arbitrary address, only if that area is
2825// available (and not reserved for something else).
2826
2827char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2828  const int max_tries = 10;
2829  char* base[max_tries];
2830  size_t size[max_tries];
2831
2832  // Solaris adds a gap between mmap'ed regions.  The size of the gap
2833  // is dependent on the requested size and the MMU.  Our initial gap
2834  // value here is just a guess and will be corrected later.
2835  bool had_top_overlap = false;
2836  bool have_adjusted_gap = false;
2837  size_t gap = 0x400000;
2838
2839  // Assert only that the size is a multiple of the page size, since
2840  // that's all that mmap requires, and since that's all we really know
2841  // about at this low abstraction level.  If we need higher alignment,
2842  // we can either pass an alignment to this method or verify alignment
2843  // in one of the methods further up the call chain.  See bug 5044738.
2844  assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2845
2846  // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2847  // Give it a try, if the kernel honors the hint we can return immediately.
2848  char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2849
2850  volatile int err = errno;
2851  if (addr == requested_addr) {
2852    return addr;
2853  } else if (addr != NULL) {
2854    pd_unmap_memory(addr, bytes);
2855  }
2856
2857  if (PrintMiscellaneous && Verbose) {
2858    char buf[256];
2859    buf[0] = '\0';
2860    if (addr == NULL) {
2861      jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2862    }
2863    warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2864            PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2865            "%s", bytes, requested_addr, addr, buf);
2866  }
2867
2868  // Address hint method didn't work.  Fall back to the old method.
2869  // In theory, once SNV becomes our oldest supported platform, this
2870  // code will no longer be needed.
2871  //
2872  // Repeatedly allocate blocks until the block is allocated at the
2873  // right spot. Give up after max_tries.
2874  int i;
2875  for (i = 0; i < max_tries; ++i) {
2876    base[i] = reserve_memory(bytes);
2877
2878    if (base[i] != NULL) {
2879      // Is this the block we wanted?
2880      if (base[i] == requested_addr) {
2881        size[i] = bytes;
2882        break;
2883      }
2884
2885      // check that the gap value is right
2886      if (had_top_overlap && !have_adjusted_gap) {
2887        size_t actual_gap = base[i-1] - base[i] - bytes;
2888        if (gap != actual_gap) {
2889          // adjust the gap value and retry the last 2 allocations
2890          assert(i > 0, "gap adjustment code problem");
2891          have_adjusted_gap = true;  // adjust the gap only once, just in case
2892          gap = actual_gap;
2893          if (PrintMiscellaneous && Verbose) {
2894            warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2895          }
2896          unmap_memory(base[i], bytes);
2897          unmap_memory(base[i-1], size[i-1]);
2898          i-=2;
2899          continue;
2900        }
2901      }
2902
2903      // Does this overlap the block we wanted? Give back the overlapped
2904      // parts and try again.
2905      //
2906      // There is still a bug in this code: if top_overlap == bytes,
2907      // the overlap is offset from requested region by the value of gap.
2908      // In this case giving back the overlapped part will not work,
2909      // because we'll give back the entire block at base[i] and
2910      // therefore the subsequent allocation will not generate a new gap.
2911      // This could be fixed with a new algorithm that used larger
2912      // or variable size chunks to find the requested region -
2913      // but such a change would introduce additional complications.
2914      // It's rare enough that the planets align for this bug,
2915      // so we'll just wait for a fix for 6204603/5003415 which
2916      // will provide a mmap flag to allow us to avoid this business.
2917
2918      size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2919      if (top_overlap >= 0 && top_overlap < bytes) {
2920        had_top_overlap = true;
2921        unmap_memory(base[i], top_overlap);
2922        base[i] += top_overlap;
2923        size[i] = bytes - top_overlap;
2924      } else {
2925        size_t bottom_overlap = base[i] + bytes - requested_addr;
2926        if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2927          if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2928            warning("attempt_reserve_memory_at: possible alignment bug");
2929          }
2930          unmap_memory(requested_addr, bottom_overlap);
2931          size[i] = bytes - bottom_overlap;
2932        } else {
2933          size[i] = bytes;
2934        }
2935      }
2936    }
2937  }
2938
2939  // Give back the unused reserved pieces.
2940
2941  for (int j = 0; j < i; ++j) {
2942    if (base[j] != NULL) {
2943      unmap_memory(base[j], size[j]);
2944    }
2945  }
2946
2947  return (i < max_tries) ? requested_addr : NULL;
2948}
2949
2950bool os::pd_release_memory(char* addr, size_t bytes) {
2951  size_t size = bytes;
2952  return munmap(addr, size) == 0;
2953}
2954
2955static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2956  assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2957         "addr must be page aligned");
2958  int retVal = mprotect(addr, bytes, prot);
2959  return retVal == 0;
2960}
2961
2962// Protect memory (Used to pass readonly pages through
2963// JNI GetArray<type>Elements with empty arrays.)
2964// Also, used for serialization page and for compressed oops null pointer
2965// checking.
2966bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2967                        bool is_committed) {
2968  unsigned int p = 0;
2969  switch (prot) {
2970  case MEM_PROT_NONE: p = PROT_NONE; break;
2971  case MEM_PROT_READ: p = PROT_READ; break;
2972  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2973  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2974  default:
2975    ShouldNotReachHere();
2976  }
2977  // is_committed is unused.
2978  return solaris_mprotect(addr, bytes, p);
2979}
2980
2981// guard_memory and unguard_memory only happens within stack guard pages.
2982// Since ISM pertains only to the heap, guard and unguard memory should not
2983/// happen with an ISM region.
2984bool os::guard_memory(char* addr, size_t bytes) {
2985  return solaris_mprotect(addr, bytes, PROT_NONE);
2986}
2987
2988bool os::unguard_memory(char* addr, size_t bytes) {
2989  return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
2990}
2991
2992// Large page support
2993static size_t _large_page_size = 0;
2994
2995// Insertion sort for small arrays (descending order).
2996static void insertion_sort_descending(size_t* array, int len) {
2997  for (int i = 0; i < len; i++) {
2998    size_t val = array[i];
2999    for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3000      size_t tmp = array[key];
3001      array[key] = array[key - 1];
3002      array[key - 1] = tmp;
3003    }
3004  }
3005}
3006
3007bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3008  const unsigned int usable_count = VM_Version::page_size_count();
3009  if (usable_count == 1) {
3010    return false;
3011  }
3012
3013  // Find the right getpagesizes interface.  When solaris 11 is the minimum
3014  // build platform, getpagesizes() (without the '2') can be called directly.
3015  typedef int (*gps_t)(size_t[], int);
3016  gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3017  if (gps_func == NULL) {
3018    gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3019    if (gps_func == NULL) {
3020      if (warn) {
3021        warning("MPSS is not supported by the operating system.");
3022      }
3023      return false;
3024    }
3025  }
3026
3027  // Fill the array of page sizes.
3028  int n = (*gps_func)(_page_sizes, page_sizes_max);
3029  assert(n > 0, "Solaris bug?");
3030
3031  if (n == page_sizes_max) {
3032    // Add a sentinel value (necessary only if the array was completely filled
3033    // since it is static (zeroed at initialization)).
3034    _page_sizes[--n] = 0;
3035    DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3036  }
3037  assert(_page_sizes[n] == 0, "missing sentinel");
3038  trace_page_sizes("available page sizes", _page_sizes, n);
3039
3040  if (n == 1) return false;     // Only one page size available.
3041
3042  // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3043  // select up to usable_count elements.  First sort the array, find the first
3044  // acceptable value, then copy the usable sizes to the top of the array and
3045  // trim the rest.  Make sure to include the default page size :-).
3046  //
3047  // A better policy could get rid of the 4M limit by taking the sizes of the
3048  // important VM memory regions (java heap and possibly the code cache) into
3049  // account.
3050  insertion_sort_descending(_page_sizes, n);
3051  const size_t size_limit =
3052    FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3053  int beg;
3054  for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3055  const int end = MIN2((int)usable_count, n) - 1;
3056  for (int cur = 0; cur < end; ++cur, ++beg) {
3057    _page_sizes[cur] = _page_sizes[beg];
3058  }
3059  _page_sizes[end] = vm_page_size();
3060  _page_sizes[end + 1] = 0;
3061
3062  if (_page_sizes[end] > _page_sizes[end - 1]) {
3063    // Default page size is not the smallest; sort again.
3064    insertion_sort_descending(_page_sizes, end + 1);
3065  }
3066  *page_size = _page_sizes[0];
3067
3068  trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3069  return true;
3070}
3071
3072void os::large_page_init() {
3073  if (UseLargePages) {
3074    // print a warning if any large page related flag is specified on command line
3075    bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3076                           !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3077
3078    UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3079  }
3080}
3081
3082bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3083  // Signal to OS that we want large pages for addresses
3084  // from addr, addr + bytes
3085  struct memcntl_mha mpss_struct;
3086  mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3087  mpss_struct.mha_pagesize = align;
3088  mpss_struct.mha_flags = 0;
3089  // Upon successful completion, memcntl() returns 0
3090  if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3091    debug_only(warning("Attempt to use MPSS failed."));
3092    return false;
3093  }
3094  return true;
3095}
3096
3097char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3098  fatal("os::reserve_memory_special should not be called on Solaris.");
3099  return NULL;
3100}
3101
3102bool os::release_memory_special(char* base, size_t bytes) {
3103  fatal("os::release_memory_special should not be called on Solaris.");
3104  return false;
3105}
3106
3107size_t os::large_page_size() {
3108  return _large_page_size;
3109}
3110
3111// MPSS allows application to commit large page memory on demand; with ISM
3112// the entire memory region must be allocated as shared memory.
3113bool os::can_commit_large_page_memory() {
3114  return true;
3115}
3116
3117bool os::can_execute_large_page_memory() {
3118  return true;
3119}
3120
3121// Read calls from inside the vm need to perform state transitions
3122size_t os::read(int fd, void *buf, unsigned int nBytes) {
3123  size_t res;
3124  JavaThread* thread = (JavaThread*)Thread::current();
3125  assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3126  ThreadBlockInVM tbiv(thread);
3127  RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3128  return res;
3129}
3130
3131size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3132  size_t res;
3133  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3134          "Assumed _thread_in_native");
3135  RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3136  return res;
3137}
3138
3139void os::naked_short_sleep(jlong ms) {
3140  assert(ms < 1000, "Un-interruptable sleep, short time use only");
3141
3142  // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3143  // Solaris requires -lrt for this.
3144  usleep((ms * 1000));
3145
3146  return;
3147}
3148
3149// Sleep forever; naked call to OS-specific sleep; use with CAUTION
3150void os::infinite_sleep() {
3151  while (true) {    // sleep forever ...
3152    ::sleep(100);   // ... 100 seconds at a time
3153  }
3154}
3155
3156// Used to convert frequent JVM_Yield() to nops
3157bool os::dont_yield() {
3158  if (DontYieldALot) {
3159    static hrtime_t last_time = 0;
3160    hrtime_t diff = getTimeNanos() - last_time;
3161
3162    if (diff < DontYieldALotInterval * 1000000)
3163      return true;
3164
3165    last_time += diff;
3166
3167    return false;
3168  }
3169  else {
3170    return false;
3171  }
3172}
3173
3174// Caveat: Solaris os::yield() causes a thread-state transition whereas
3175// the linux and win32 implementations do not.  This should be checked.
3176
3177void os::yield() {
3178  // Yields to all threads with same or greater priority
3179  os::sleep(Thread::current(), 0, false);
3180}
3181
3182// Note that yield semantics are defined by the scheduling class to which
3183// the thread currently belongs.  Typically, yield will _not yield to
3184// other equal or higher priority threads that reside on the dispatch queues
3185// of other CPUs.
3186
3187os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3188
3189void os::yield_all() {
3190  // Yields to all threads, including threads with lower priorities
3191  os::sleep(Thread::current(), 1, false);
3192}
3193
3194// Interface for setting lwp priorities.  If we are using T2 libthread,
3195// which forces the use of BoundThreads or we manually set UseBoundThreads,
3196// all of our threads will be assigned to real lwp's.  Using the thr_setprio
3197// function is meaningless in this mode so we must adjust the real lwp's priority
3198// The routines below implement the getting and setting of lwp priorities.
3199//
3200// Note: T2 is now the only supported libthread. UseBoundThreads flag is
3201//       being deprecated and all threads are now BoundThreads
3202//
3203// Note: There are three priority scales used on Solaris.  Java priotities
3204//       which range from 1 to 10, libthread "thr_setprio" scale which range
3205//       from 0 to 127, and the current scheduling class of the process we
3206//       are running in.  This is typically from -60 to +60.
3207//       The setting of the lwp priorities in done after a call to thr_setprio
3208//       so Java priorities are mapped to libthread priorities and we map from
3209//       the latter to lwp priorities.  We don't keep priorities stored in
3210//       Java priorities since some of our worker threads want to set priorities
3211//       higher than all Java threads.
3212//
3213// For related information:
3214// (1)  man -s 2 priocntl
3215// (2)  man -s 4 priocntl
3216// (3)  man dispadmin
3217// =    librt.so
3218// =    libthread/common/rtsched.c - thrp_setlwpprio().
3219// =    ps -cL <pid> ... to validate priority.
3220// =    sched_get_priority_min and _max
3221//              pthread_create
3222//              sched_setparam
3223//              pthread_setschedparam
3224//
3225// Assumptions:
3226// +    We assume that all threads in the process belong to the same
3227//              scheduling class.   IE. an homogenous process.
3228// +    Must be root or in IA group to change change "interactive" attribute.
3229//              Priocntl() will fail silently.  The only indication of failure is when
3230//              we read-back the value and notice that it hasn't changed.
3231// +    Interactive threads enter the runq at the head, non-interactive at the tail.
3232// +    For RT, change timeslice as well.  Invariant:
3233//              constant "priority integral"
3234//              Konst == TimeSlice * (60-Priority)
3235//              Given a priority, compute appropriate timeslice.
3236// +    Higher numerical values have higher priority.
3237
3238// sched class attributes
3239typedef struct {
3240        int   schedPolicy;              // classID
3241        int   maxPrio;
3242        int   minPrio;
3243} SchedInfo;
3244
3245
3246static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3247
3248#ifdef ASSERT
3249static int  ReadBackValidate = 1;
3250#endif
3251static int  myClass     = 0;
3252static int  myMin       = 0;
3253static int  myMax       = 0;
3254static int  myCur       = 0;
3255static bool priocntl_enable = false;
3256
3257static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3258static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3259
3260
3261// lwp_priocntl_init
3262//
3263// Try to determine the priority scale for our process.
3264//
3265// Return errno or 0 if OK.
3266//
3267static int lwp_priocntl_init () {
3268  int rslt;
3269  pcinfo_t ClassInfo;
3270  pcparms_t ParmInfo;
3271  int i;
3272
3273  if (!UseThreadPriorities) return 0;
3274
3275  // If ThreadPriorityPolicy is 1, switch tables
3276  if (ThreadPriorityPolicy == 1) {
3277    for (i = 0 ; i < CriticalPriority+1; i++)
3278      os::java_to_os_priority[i] = prio_policy1[i];
3279  }
3280  if (UseCriticalJavaThreadPriority) {
3281    // MaxPriority always maps to the FX scheduling class and criticalPrio.
3282    // See set_native_priority() and set_lwp_class_and_priority().
3283    // Save original MaxPriority mapping in case attempt to
3284    // use critical priority fails.
3285    java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3286    // Set negative to distinguish from other priorities
3287    os::java_to_os_priority[MaxPriority] = -criticalPrio;
3288  }
3289
3290  // Get IDs for a set of well-known scheduling classes.
3291  // TODO-FIXME: GETCLINFO returns the current # of classes in the
3292  // the system.  We should have a loop that iterates over the
3293  // classID values, which are known to be "small" integers.
3294
3295  strcpy(ClassInfo.pc_clname, "TS");
3296  ClassInfo.pc_cid = -1;
3297  rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3298  if (rslt < 0) return errno;
3299  assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3300  tsLimits.schedPolicy = ClassInfo.pc_cid;
3301  tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3302  tsLimits.minPrio = -tsLimits.maxPrio;
3303
3304  strcpy(ClassInfo.pc_clname, "IA");
3305  ClassInfo.pc_cid = -1;
3306  rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3307  if (rslt < 0) return errno;
3308  assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3309  iaLimits.schedPolicy = ClassInfo.pc_cid;
3310  iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3311  iaLimits.minPrio = -iaLimits.maxPrio;
3312
3313  strcpy(ClassInfo.pc_clname, "RT");
3314  ClassInfo.pc_cid = -1;
3315  rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3316  if (rslt < 0) return errno;
3317  assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3318  rtLimits.schedPolicy = ClassInfo.pc_cid;
3319  rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3320  rtLimits.minPrio = 0;
3321
3322  strcpy(ClassInfo.pc_clname, "FX");
3323  ClassInfo.pc_cid = -1;
3324  rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3325  if (rslt < 0) return errno;
3326  assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3327  fxLimits.schedPolicy = ClassInfo.pc_cid;
3328  fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3329  fxLimits.minPrio = 0;
3330
3331  // Query our "current" scheduling class.
3332  // This will normally be IA, TS or, rarely, FX or RT.
3333  memset(&ParmInfo, 0, sizeof(ParmInfo));
3334  ParmInfo.pc_cid = PC_CLNULL;
3335  rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3336  if (rslt < 0) return errno;
3337  myClass = ParmInfo.pc_cid;
3338
3339  // We now know our scheduling classId, get specific information
3340  // about the class.
3341  ClassInfo.pc_cid = myClass;
3342  ClassInfo.pc_clname[0] = 0;
3343  rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3344  if (rslt < 0) return errno;
3345
3346  if (ThreadPriorityVerbose) {
3347    tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3348  }
3349
3350  memset(&ParmInfo, 0, sizeof(pcparms_t));
3351  ParmInfo.pc_cid = PC_CLNULL;
3352  rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3353  if (rslt < 0) return errno;
3354
3355  if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3356    myMin = rtLimits.minPrio;
3357    myMax = rtLimits.maxPrio;
3358  } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3359    iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3360    myMin = iaLimits.minPrio;
3361    myMax = iaLimits.maxPrio;
3362    myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3363  } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3364    tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3365    myMin = tsLimits.minPrio;
3366    myMax = tsLimits.maxPrio;
3367    myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3368  } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3369    fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3370    myMin = fxLimits.minPrio;
3371    myMax = fxLimits.maxPrio;
3372    myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3373  } else {
3374    // No clue - punt
3375    if (ThreadPriorityVerbose)
3376      tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3377    return EINVAL;      // no clue, punt
3378  }
3379
3380  if (ThreadPriorityVerbose) {
3381    tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3382  }
3383
3384  priocntl_enable = true;  // Enable changing priorities
3385  return 0;
3386}
3387
3388#define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3389#define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3390#define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3391#define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3392
3393
3394// scale_to_lwp_priority
3395//
3396// Convert from the libthread "thr_setprio" scale to our current
3397// lwp scheduling class scale.
3398//
3399static
3400int     scale_to_lwp_priority (int rMin, int rMax, int x)
3401{
3402  int v;
3403
3404  if (x == 127) return rMax;            // avoid round-down
3405    v = (((x*(rMax-rMin)))/128)+rMin;
3406  return v;
3407}
3408
3409
3410// set_lwp_class_and_priority
3411int set_lwp_class_and_priority(int ThreadID, int lwpid,
3412                               int newPrio, int new_class, bool scale) {
3413  int rslt;
3414  int Actual, Expected, prv;
3415  pcparms_t ParmInfo;                   // for GET-SET
3416#ifdef ASSERT
3417  pcparms_t ReadBack;                   // for readback
3418#endif
3419
3420  // Set priority via PC_GETPARMS, update, PC_SETPARMS
3421  // Query current values.
3422  // TODO: accelerate this by eliminating the PC_GETPARMS call.
3423  // Cache "pcparms_t" in global ParmCache.
3424  // TODO: elide set-to-same-value
3425
3426  // If something went wrong on init, don't change priorities.
3427  if ( !priocntl_enable ) {
3428    if (ThreadPriorityVerbose)
3429      tty->print_cr("Trying to set priority but init failed, ignoring");
3430    return EINVAL;
3431  }
3432
3433  // If lwp hasn't started yet, just return
3434  // the _start routine will call us again.
3435  if ( lwpid <= 0 ) {
3436    if (ThreadPriorityVerbose) {
3437      tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3438                     INTPTR_FORMAT " to %d, lwpid not set",
3439                     ThreadID, newPrio);
3440    }
3441    return 0;
3442  }
3443
3444  if (ThreadPriorityVerbose) {
3445    tty->print_cr ("set_lwp_class_and_priority("
3446                   INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3447                   ThreadID, lwpid, newPrio);
3448  }
3449
3450  memset(&ParmInfo, 0, sizeof(pcparms_t));
3451  ParmInfo.pc_cid = PC_CLNULL;
3452  rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3453  if (rslt < 0) return errno;
3454
3455  int cur_class = ParmInfo.pc_cid;
3456  ParmInfo.pc_cid = (id_t)new_class;
3457
3458  if (new_class == rtLimits.schedPolicy) {
3459    rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3460    rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3461                                                       rtLimits.maxPrio, newPrio)
3462                               : newPrio;
3463    rtInfo->rt_tqsecs  = RT_NOCHANGE;
3464    rtInfo->rt_tqnsecs = RT_NOCHANGE;
3465    if (ThreadPriorityVerbose) {
3466      tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3467    }
3468  } else if (new_class == iaLimits.schedPolicy) {
3469    iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3470    int maxClamped     = MIN2(iaLimits.maxPrio,
3471                              cur_class == new_class
3472                                ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3473    iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3474                                                       maxClamped, newPrio)
3475                               : newPrio;
3476    iaInfo->ia_uprilim = cur_class == new_class
3477                           ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3478    iaInfo->ia_mode    = IA_NOCHANGE;
3479    if (ThreadPriorityVerbose) {
3480      tty->print_cr("IA: [%d...%d] %d->%d\n",
3481                    iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3482    }
3483  } else if (new_class == tsLimits.schedPolicy) {
3484    tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3485    int maxClamped     = MIN2(tsLimits.maxPrio,
3486                              cur_class == new_class
3487                                ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3488    tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3489                                                       maxClamped, newPrio)
3490                               : newPrio;
3491    tsInfo->ts_uprilim = cur_class == new_class
3492                           ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3493    if (ThreadPriorityVerbose) {
3494      tty->print_cr("TS: [%d...%d] %d->%d\n",
3495                    tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3496    }
3497  } else if (new_class == fxLimits.schedPolicy) {
3498    fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3499    int maxClamped     = MIN2(fxLimits.maxPrio,
3500                              cur_class == new_class
3501                                ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3502    fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3503                                                       maxClamped, newPrio)
3504                               : newPrio;
3505    fxInfo->fx_uprilim = cur_class == new_class
3506                           ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3507    fxInfo->fx_tqsecs  = FX_NOCHANGE;
3508    fxInfo->fx_tqnsecs = FX_NOCHANGE;
3509    if (ThreadPriorityVerbose) {
3510      tty->print_cr("FX: [%d...%d] %d->%d\n",
3511                    fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3512    }
3513  } else {
3514    if (ThreadPriorityVerbose) {
3515      tty->print_cr("Unknown new scheduling class %d\n", new_class);
3516    }
3517    return EINVAL;    // no clue, punt
3518  }
3519
3520  rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3521  if (ThreadPriorityVerbose && rslt) {
3522    tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3523  }
3524  if (rslt < 0) return errno;
3525
3526#ifdef ASSERT
3527  // Sanity check: read back what we just attempted to set.
3528  // In theory it could have changed in the interim ...
3529  //
3530  // The priocntl system call is tricky.
3531  // Sometimes it'll validate the priority value argument and
3532  // return EINVAL if unhappy.  At other times it fails silently.
3533  // Readbacks are prudent.
3534
3535  if (!ReadBackValidate) return 0;
3536
3537  memset(&ReadBack, 0, sizeof(pcparms_t));
3538  ReadBack.pc_cid = PC_CLNULL;
3539  rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3540  assert(rslt >= 0, "priocntl failed");
3541  Actual = Expected = 0xBAD;
3542  assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3543  if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3544    Actual   = RTPRI(ReadBack)->rt_pri;
3545    Expected = RTPRI(ParmInfo)->rt_pri;
3546  } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3547    Actual   = IAPRI(ReadBack)->ia_upri;
3548    Expected = IAPRI(ParmInfo)->ia_upri;
3549  } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3550    Actual   = TSPRI(ReadBack)->ts_upri;
3551    Expected = TSPRI(ParmInfo)->ts_upri;
3552  } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3553    Actual   = FXPRI(ReadBack)->fx_upri;
3554    Expected = FXPRI(ParmInfo)->fx_upri;
3555  } else {
3556    if (ThreadPriorityVerbose) {
3557      tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3558                    ParmInfo.pc_cid);
3559    }
3560  }
3561
3562  if (Actual != Expected) {
3563    if (ThreadPriorityVerbose) {
3564      tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3565                     lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3566    }
3567  }
3568#endif
3569
3570  return 0;
3571}
3572
3573// Solaris only gives access to 128 real priorities at a time,
3574// so we expand Java's ten to fill this range.  This would be better
3575// if we dynamically adjusted relative priorities.
3576//
3577// The ThreadPriorityPolicy option allows us to select 2 different
3578// priority scales.
3579//
3580// ThreadPriorityPolicy=0
3581// Since the Solaris' default priority is MaximumPriority, we do not
3582// set a priority lower than Max unless a priority lower than
3583// NormPriority is requested.
3584//
3585// ThreadPriorityPolicy=1
3586// This mode causes the priority table to get filled with
3587// linear values.  NormPriority get's mapped to 50% of the
3588// Maximum priority an so on.  This will cause VM threads
3589// to get unfair treatment against other Solaris processes
3590// which do not explicitly alter their thread priorities.
3591//
3592
3593int os::java_to_os_priority[CriticalPriority + 1] = {
3594  -99999,         // 0 Entry should never be used
3595
3596  0,              // 1 MinPriority
3597  32,             // 2
3598  64,             // 3
3599
3600  96,             // 4
3601  127,            // 5 NormPriority
3602  127,            // 6
3603
3604  127,            // 7
3605  127,            // 8
3606  127,            // 9 NearMaxPriority
3607
3608  127,            // 10 MaxPriority
3609
3610  -criticalPrio   // 11 CriticalPriority
3611};
3612
3613OSReturn os::set_native_priority(Thread* thread, int newpri) {
3614  OSThread* osthread = thread->osthread();
3615
3616  // Save requested priority in case the thread hasn't been started
3617  osthread->set_native_priority(newpri);
3618
3619  // Check for critical priority request
3620  bool fxcritical = false;
3621  if (newpri == -criticalPrio) {
3622    fxcritical = true;
3623    newpri = criticalPrio;
3624  }
3625
3626  assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3627  if (!UseThreadPriorities) return OS_OK;
3628
3629  int status = 0;
3630
3631  if (!fxcritical) {
3632    // Use thr_setprio only if we have a priority that thr_setprio understands
3633    status = thr_setprio(thread->osthread()->thread_id(), newpri);
3634  }
3635
3636  int lwp_status =
3637          set_lwp_class_and_priority(osthread->thread_id(),
3638          osthread->lwp_id(),
3639          newpri,
3640          fxcritical ? fxLimits.schedPolicy : myClass,
3641          !fxcritical);
3642  if (lwp_status != 0 && fxcritical) {
3643    // Try again, this time without changing the scheduling class
3644    newpri = java_MaxPriority_to_os_priority;
3645    lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3646            osthread->lwp_id(),
3647            newpri, myClass, false);
3648  }
3649  status |= lwp_status;
3650  return (status == 0) ? OS_OK : OS_ERR;
3651}
3652
3653
3654OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3655  int p;
3656  if ( !UseThreadPriorities ) {
3657    *priority_ptr = NormalPriority;
3658    return OS_OK;
3659  }
3660  int status = thr_getprio(thread->osthread()->thread_id(), &p);
3661  if (status != 0) {
3662    return OS_ERR;
3663  }
3664  *priority_ptr = p;
3665  return OS_OK;
3666}
3667
3668
3669// Hint to the underlying OS that a task switch would not be good.
3670// Void return because it's a hint and can fail.
3671void os::hint_no_preempt() {
3672  schedctl_start(schedctl_init());
3673}
3674
3675static void resume_clear_context(OSThread *osthread) {
3676  osthread->set_ucontext(NULL);
3677}
3678
3679static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3680  osthread->set_ucontext(context);
3681}
3682
3683static Semaphore sr_semaphore;
3684
3685void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3686  // Save and restore errno to avoid confusing native code with EINTR
3687  // after sigsuspend.
3688  int old_errno = errno;
3689
3690  OSThread* osthread = thread->osthread();
3691  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3692
3693  os::SuspendResume::State current = osthread->sr.state();
3694  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3695    suspend_save_context(osthread, uc);
3696
3697    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3698    os::SuspendResume::State state = osthread->sr.suspended();
3699    if (state == os::SuspendResume::SR_SUSPENDED) {
3700      sigset_t suspend_set;  // signals for sigsuspend()
3701
3702      // get current set of blocked signals and unblock resume signal
3703      thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3704      sigdelset(&suspend_set, os::Solaris::SIGasync());
3705
3706      sr_semaphore.signal();
3707      // wait here until we are resumed
3708      while (1) {
3709        sigsuspend(&suspend_set);
3710
3711        os::SuspendResume::State result = osthread->sr.running();
3712        if (result == os::SuspendResume::SR_RUNNING) {
3713          sr_semaphore.signal();
3714          break;
3715        }
3716      }
3717
3718    } else if (state == os::SuspendResume::SR_RUNNING) {
3719      // request was cancelled, continue
3720    } else {
3721      ShouldNotReachHere();
3722    }
3723
3724    resume_clear_context(osthread);
3725  } else if (current == os::SuspendResume::SR_RUNNING) {
3726    // request was cancelled, continue
3727  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3728    // ignore
3729  } else {
3730    // ignore
3731  }
3732
3733  errno = old_errno;
3734}
3735
3736void os::print_statistics() {
3737}
3738
3739int os::message_box(const char* title, const char* message) {
3740  int i;
3741  fdStream err(defaultStream::error_fd());
3742  for (i = 0; i < 78; i++) err.print_raw("=");
3743  err.cr();
3744  err.print_raw_cr(title);
3745  for (i = 0; i < 78; i++) err.print_raw("-");
3746  err.cr();
3747  err.print_raw_cr(message);
3748  for (i = 0; i < 78; i++) err.print_raw("=");
3749  err.cr();
3750
3751  char buf[16];
3752  // Prevent process from exiting upon "read error" without consuming all CPU
3753  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3754
3755  return buf[0] == 'y' || buf[0] == 'Y';
3756}
3757
3758static int sr_notify(OSThread* osthread) {
3759  int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3760  assert_status(status == 0, status, "thr_kill");
3761  return status;
3762}
3763
3764// "Randomly" selected value for how long we want to spin
3765// before bailing out on suspending a thread, also how often
3766// we send a signal to a thread we want to resume
3767static const int RANDOMLY_LARGE_INTEGER = 1000000;
3768static const int RANDOMLY_LARGE_INTEGER2 = 100;
3769
3770static bool do_suspend(OSThread* osthread) {
3771  assert(osthread->sr.is_running(), "thread should be running");
3772  assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3773
3774  // mark as suspended and send signal
3775  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3776    // failed to switch, state wasn't running?
3777    ShouldNotReachHere();
3778    return false;
3779  }
3780
3781  if (sr_notify(osthread) != 0) {
3782    ShouldNotReachHere();
3783  }
3784
3785  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3786  while (true) {
3787    if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3788      break;
3789    } else {
3790      // timeout
3791      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3792      if (cancelled == os::SuspendResume::SR_RUNNING) {
3793        return false;
3794      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3795        // make sure that we consume the signal on the semaphore as well
3796        sr_semaphore.wait();
3797        break;
3798      } else {
3799        ShouldNotReachHere();
3800        return false;
3801      }
3802    }
3803  }
3804
3805  guarantee(osthread->sr.is_suspended(), "Must be suspended");
3806  return true;
3807}
3808
3809static void do_resume(OSThread* osthread) {
3810  assert(osthread->sr.is_suspended(), "thread should be suspended");
3811  assert(!sr_semaphore.trywait(), "invalid semaphore state");
3812
3813  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3814    // failed to switch to WAKEUP_REQUEST
3815    ShouldNotReachHere();
3816    return;
3817  }
3818
3819  while (true) {
3820    if (sr_notify(osthread) == 0) {
3821      if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3822        if (osthread->sr.is_running()) {
3823          return;
3824        }
3825      }
3826    } else {
3827      ShouldNotReachHere();
3828    }
3829  }
3830
3831  guarantee(osthread->sr.is_running(), "Must be running!");
3832}
3833
3834void os::SuspendedThreadTask::internal_do_task() {
3835  if (do_suspend(_thread->osthread())) {
3836    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3837    do_task(context);
3838    do_resume(_thread->osthread());
3839  }
3840}
3841
3842class PcFetcher : public os::SuspendedThreadTask {
3843public:
3844  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3845  ExtendedPC result();
3846protected:
3847  void do_task(const os::SuspendedThreadTaskContext& context);
3848private:
3849  ExtendedPC _epc;
3850};
3851
3852ExtendedPC PcFetcher::result() {
3853  guarantee(is_done(), "task is not done yet.");
3854  return _epc;
3855}
3856
3857void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3858  Thread* thread = context.thread();
3859  OSThread* osthread = thread->osthread();
3860  if (osthread->ucontext() != NULL) {
3861    _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3862  } else {
3863    // NULL context is unexpected, double-check this is the VMThread
3864    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3865  }
3866}
3867
3868// A lightweight implementation that does not suspend the target thread and
3869// thus returns only a hint. Used for profiling only!
3870ExtendedPC os::get_thread_pc(Thread* thread) {
3871  // Make sure that it is called by the watcher and the Threads lock is owned.
3872  assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3873  // For now, is only used to profile the VM Thread
3874  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3875  PcFetcher fetcher(thread);
3876  fetcher.run();
3877  return fetcher.result();
3878}
3879
3880
3881// This does not do anything on Solaris. This is basically a hook for being
3882// able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3883void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
3884  f(value, method, args, thread);
3885}
3886
3887// This routine may be used by user applications as a "hook" to catch signals.
3888// The user-defined signal handler must pass unrecognized signals to this
3889// routine, and if it returns true (non-zero), then the signal handler must
3890// return immediately.  If the flag "abort_if_unrecognized" is true, then this
3891// routine will never retun false (zero), but instead will execute a VM panic
3892// routine kill the process.
3893//
3894// If this routine returns false, it is OK to call it again.  This allows
3895// the user-defined signal handler to perform checks either before or after
3896// the VM performs its own checks.  Naturally, the user code would be making
3897// a serious error if it tried to handle an exception (such as a null check
3898// or breakpoint) that the VM was generating for its own correct operation.
3899//
3900// This routine may recognize any of the following kinds of signals:
3901// SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3902// os::Solaris::SIGasync
3903// It should be consulted by handlers for any of those signals.
3904// It explicitly does not recognize os::Solaris::SIGinterrupt
3905//
3906// The caller of this routine must pass in the three arguments supplied
3907// to the function referred to in the "sa_sigaction" (not the "sa_handler")
3908// field of the structure passed to sigaction().  This routine assumes that
3909// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3910//
3911// Note that the VM will print warnings if it detects conflicting signal
3912// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3913//
3914extern "C" JNIEXPORT int
3915JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
3916                          int abort_if_unrecognized);
3917
3918
3919void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3920  int orig_errno = errno;  // Preserve errno value over signal handler.
3921  JVM_handle_solaris_signal(sig, info, ucVoid, true);
3922  errno = orig_errno;
3923}
3924
3925/* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3926   is needed to provoke threads blocked on IO to return an EINTR
3927   Note: this explicitly does NOT call JVM_handle_solaris_signal and
3928   does NOT participate in signal chaining due to requirement for
3929   NOT setting SA_RESTART to make EINTR work. */
3930extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3931   if (UseSignalChaining) {
3932      struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3933      if (actp && actp->sa_handler) {
3934        vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3935      }
3936   }
3937}
3938
3939// This boolean allows users to forward their own non-matching signals
3940// to JVM_handle_solaris_signal, harmlessly.
3941bool os::Solaris::signal_handlers_are_installed = false;
3942
3943// For signal-chaining
3944bool os::Solaris::libjsig_is_loaded = false;
3945typedef struct sigaction *(*get_signal_t)(int);
3946get_signal_t os::Solaris::get_signal_action = NULL;
3947
3948struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3949  struct sigaction *actp = NULL;
3950
3951  if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3952    // Retrieve the old signal handler from libjsig
3953    actp = (*get_signal_action)(sig);
3954  }
3955  if (actp == NULL) {
3956    // Retrieve the preinstalled signal handler from jvm
3957    actp = get_preinstalled_handler(sig);
3958  }
3959
3960  return actp;
3961}
3962
3963static bool call_chained_handler(struct sigaction *actp, int sig,
3964                                 siginfo_t *siginfo, void *context) {
3965  // Call the old signal handler
3966  if (actp->sa_handler == SIG_DFL) {
3967    // It's more reasonable to let jvm treat it as an unexpected exception
3968    // instead of taking the default action.
3969    return false;
3970  } else if (actp->sa_handler != SIG_IGN) {
3971    if ((actp->sa_flags & SA_NODEFER) == 0) {
3972      // automaticlly block the signal
3973      sigaddset(&(actp->sa_mask), sig);
3974    }
3975
3976    sa_handler_t hand;
3977    sa_sigaction_t sa;
3978    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3979    // retrieve the chained handler
3980    if (siginfo_flag_set) {
3981      sa = actp->sa_sigaction;
3982    } else {
3983      hand = actp->sa_handler;
3984    }
3985
3986    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3987      actp->sa_handler = SIG_DFL;
3988    }
3989
3990    // try to honor the signal mask
3991    sigset_t oset;
3992    thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3993
3994    // call into the chained handler
3995    if (siginfo_flag_set) {
3996      (*sa)(sig, siginfo, context);
3997    } else {
3998      (*hand)(sig);
3999    }
4000
4001    // restore the signal mask
4002    thr_sigsetmask(SIG_SETMASK, &oset, 0);
4003  }
4004  // Tell jvm's signal handler the signal is taken care of.
4005  return true;
4006}
4007
4008bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4009  bool chained = false;
4010  // signal-chaining
4011  if (UseSignalChaining) {
4012    struct sigaction *actp = get_chained_signal_action(sig);
4013    if (actp != NULL) {
4014      chained = call_chained_handler(actp, sig, siginfo, context);
4015    }
4016  }
4017  return chained;
4018}
4019
4020struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4021  assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4022  if (preinstalled_sigs[sig] != 0) {
4023    return &chainedsigactions[sig];
4024  }
4025  return NULL;
4026}
4027
4028void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4029
4030  assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4031  assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4032  chainedsigactions[sig] = oldAct;
4033  preinstalled_sigs[sig] = 1;
4034}
4035
4036void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4037  // Check for overwrite.
4038  struct sigaction oldAct;
4039  sigaction(sig, (struct sigaction*)NULL, &oldAct);
4040  void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4041                                      : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4042  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4043      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4044      oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4045    if (AllowUserSignalHandlers || !set_installed) {
4046      // Do not overwrite; user takes responsibility to forward to us.
4047      return;
4048    } else if (UseSignalChaining) {
4049      if (oktochain) {
4050        // save the old handler in jvm
4051        save_preinstalled_handler(sig, oldAct);
4052      } else {
4053        vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4054      }
4055      // libjsig also interposes the sigaction() call below and saves the
4056      // old sigaction on it own.
4057    } else {
4058      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4059                    "%#lx for signal %d.", (long)oldhand, sig));
4060    }
4061  }
4062
4063  struct sigaction sigAct;
4064  sigfillset(&(sigAct.sa_mask));
4065  sigAct.sa_handler = SIG_DFL;
4066
4067  sigAct.sa_sigaction = signalHandler;
4068  // Handle SIGSEGV on alternate signal stack if
4069  // not using stack banging
4070  if (!UseStackBanging && sig == SIGSEGV) {
4071    sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4072  // Interruptible i/o requires SA_RESTART cleared so EINTR
4073  // is returned instead of restarting system calls
4074  } else if (sig == os::Solaris::SIGinterrupt()) {
4075    sigemptyset(&sigAct.sa_mask);
4076    sigAct.sa_handler = NULL;
4077    sigAct.sa_flags = SA_SIGINFO;
4078    sigAct.sa_sigaction = sigINTRHandler;
4079  } else {
4080    sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4081  }
4082  os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4083
4084  sigaction(sig, &sigAct, &oldAct);
4085
4086  void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4087                                       : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4088  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4089}
4090
4091
4092#define DO_SIGNAL_CHECK(sig) \
4093  if (!sigismember(&check_signal_done, sig)) \
4094    os::Solaris::check_signal_handler(sig)
4095
4096// This method is a periodic task to check for misbehaving JNI applications
4097// under CheckJNI, we can add any periodic checks here
4098
4099void os::run_periodic_checks() {
4100  // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4101  // thereby preventing a NULL checks.
4102  if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4103
4104  if (check_signals == false) return;
4105
4106  // SEGV and BUS if overridden could potentially prevent
4107  // generation of hs*.log in the event of a crash, debugging
4108  // such a case can be very challenging, so we absolutely
4109  // check for the following for a good measure:
4110  DO_SIGNAL_CHECK(SIGSEGV);
4111  DO_SIGNAL_CHECK(SIGILL);
4112  DO_SIGNAL_CHECK(SIGFPE);
4113  DO_SIGNAL_CHECK(SIGBUS);
4114  DO_SIGNAL_CHECK(SIGPIPE);
4115  DO_SIGNAL_CHECK(SIGXFSZ);
4116
4117  // ReduceSignalUsage allows the user to override these handlers
4118  // see comments at the very top and jvm_solaris.h
4119  if (!ReduceSignalUsage) {
4120    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4121    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4122    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4123    DO_SIGNAL_CHECK(BREAK_SIGNAL);
4124  }
4125
4126  // See comments above for using JVM1/JVM2 and UseAltSigs
4127  DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4128  DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4129
4130}
4131
4132typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4133
4134static os_sigaction_t os_sigaction = NULL;
4135
4136void os::Solaris::check_signal_handler(int sig) {
4137  char buf[O_BUFLEN];
4138  address jvmHandler = NULL;
4139
4140  struct sigaction act;
4141  if (os_sigaction == NULL) {
4142    // only trust the default sigaction, in case it has been interposed
4143    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4144    if (os_sigaction == NULL) return;
4145  }
4146
4147  os_sigaction(sig, (struct sigaction*)NULL, &act);
4148
4149  address thisHandler = (act.sa_flags & SA_SIGINFO)
4150    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4151    : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4152
4153
4154  switch(sig) {
4155    case SIGSEGV:
4156    case SIGBUS:
4157    case SIGFPE:
4158    case SIGPIPE:
4159    case SIGXFSZ:
4160    case SIGILL:
4161      jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4162      break;
4163
4164    case SHUTDOWN1_SIGNAL:
4165    case SHUTDOWN2_SIGNAL:
4166    case SHUTDOWN3_SIGNAL:
4167    case BREAK_SIGNAL:
4168      jvmHandler = (address)user_handler();
4169      break;
4170
4171    default:
4172      int intrsig = os::Solaris::SIGinterrupt();
4173      int asynsig = os::Solaris::SIGasync();
4174
4175      if (sig == intrsig) {
4176        jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4177      } else if (sig == asynsig) {
4178        jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4179      } else {
4180        return;
4181      }
4182      break;
4183  }
4184
4185
4186  if (thisHandler != jvmHandler) {
4187    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4188    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4189    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4190    // No need to check this sig any longer
4191    sigaddset(&check_signal_done, sig);
4192    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4193    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4194      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4195                    exception_name(sig, buf, O_BUFLEN));
4196    }
4197  } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4198    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4199    tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4200    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4201    // No need to check this sig any longer
4202    sigaddset(&check_signal_done, sig);
4203  }
4204
4205  // Print all the signal handler state
4206  if (sigismember(&check_signal_done, sig)) {
4207    print_signal_handlers(tty, buf, O_BUFLEN);
4208  }
4209
4210}
4211
4212void os::Solaris::install_signal_handlers() {
4213  bool libjsigdone = false;
4214  signal_handlers_are_installed = true;
4215
4216  // signal-chaining
4217  typedef void (*signal_setting_t)();
4218  signal_setting_t begin_signal_setting = NULL;
4219  signal_setting_t end_signal_setting = NULL;
4220  begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4221                                        dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4222  if (begin_signal_setting != NULL) {
4223    end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4224                                        dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4225    get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4226                                       dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4227    get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4228                                         dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4229    libjsig_is_loaded = true;
4230    if (os::Solaris::get_libjsig_version != NULL) {
4231      libjsigversion =  (*os::Solaris::get_libjsig_version)();
4232    }
4233    assert(UseSignalChaining, "should enable signal-chaining");
4234  }
4235  if (libjsig_is_loaded) {
4236    // Tell libjsig jvm is setting signal handlers
4237    (*begin_signal_setting)();
4238  }
4239
4240  set_signal_handler(SIGSEGV, true, true);
4241  set_signal_handler(SIGPIPE, true, true);
4242  set_signal_handler(SIGXFSZ, true, true);
4243  set_signal_handler(SIGBUS, true, true);
4244  set_signal_handler(SIGILL, true, true);
4245  set_signal_handler(SIGFPE, true, true);
4246
4247
4248  if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4249
4250    // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4251    // can not register overridable signals which might be > 32
4252    if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4253    // Tell libjsig jvm has finished setting signal handlers
4254      (*end_signal_setting)();
4255      libjsigdone = true;
4256    }
4257  }
4258
4259  // Never ok to chain our SIGinterrupt
4260  set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4261  set_signal_handler(os::Solaris::SIGasync(), true, true);
4262
4263  if (libjsig_is_loaded && !libjsigdone) {
4264    // Tell libjsig jvm finishes setting signal handlers
4265    (*end_signal_setting)();
4266  }
4267
4268  // We don't activate signal checker if libjsig is in place, we trust ourselves
4269  // and if UserSignalHandler is installed all bets are off.
4270  // Log that signal checking is off only if -verbose:jni is specified.
4271  if (CheckJNICalls) {
4272    if (libjsig_is_loaded) {
4273      if (PrintJNIResolving) {
4274        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4275      }
4276      check_signals = false;
4277    }
4278    if (AllowUserSignalHandlers) {
4279      if (PrintJNIResolving) {
4280        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4281      }
4282      check_signals = false;
4283    }
4284  }
4285}
4286
4287
4288void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4289
4290const char * signames[] = {
4291  "SIG0",
4292  "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4293  "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4294  "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4295  "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4296  "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4297  "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4298  "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4299  "SIGCANCEL", "SIGLOST"
4300};
4301
4302const char* os::exception_name(int exception_code, char* buf, size_t size) {
4303  if (0 < exception_code && exception_code <= SIGRTMAX) {
4304    // signal
4305    if (exception_code < sizeof(signames)/sizeof(const char*)) {
4306       jio_snprintf(buf, size, "%s", signames[exception_code]);
4307    } else {
4308       jio_snprintf(buf, size, "SIG%d", exception_code);
4309    }
4310    return buf;
4311  } else {
4312    return NULL;
4313  }
4314}
4315
4316// (Static) wrapper for getisax(2) call.
4317os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4318
4319// (Static) wrappers for the liblgrp API
4320os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4321os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4322os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4323os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4324os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4325os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4326os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4327os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4328os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4329
4330// (Static) wrapper for meminfo() call.
4331os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4332
4333static address resolve_symbol_lazy(const char* name) {
4334  address addr = (address) dlsym(RTLD_DEFAULT, name);
4335  if(addr == NULL) {
4336    // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4337    addr = (address) dlsym(RTLD_NEXT, name);
4338  }
4339  return addr;
4340}
4341
4342static address resolve_symbol(const char* name) {
4343  address addr = resolve_symbol_lazy(name);
4344  if(addr == NULL) {
4345    fatal(dlerror());
4346  }
4347  return addr;
4348}
4349
4350void os::Solaris::libthread_init() {
4351  address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4352
4353  lwp_priocntl_init();
4354
4355  // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4356  if(func == NULL) {
4357    func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4358    // Guarantee that this VM is running on an new enough OS (5.6 or
4359    // later) that it will have a new enough libthread.so.
4360    guarantee(func != NULL, "libthread.so is too old.");
4361  }
4362
4363  int size;
4364  void (*handler_info_func)(address *, int *);
4365  handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4366  handler_info_func(&handler_start, &size);
4367  handler_end = handler_start + size;
4368}
4369
4370
4371int_fnP_mutex_tP os::Solaris::_mutex_lock;
4372int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4373int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4374int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4375int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4376int os::Solaris::_mutex_scope = USYNC_THREAD;
4377
4378int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4379int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4380int_fnP_cond_tP os::Solaris::_cond_signal;
4381int_fnP_cond_tP os::Solaris::_cond_broadcast;
4382int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4383int_fnP_cond_tP os::Solaris::_cond_destroy;
4384int os::Solaris::_cond_scope = USYNC_THREAD;
4385
4386void os::Solaris::synchronization_init() {
4387  if(UseLWPSynchronization) {
4388    os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4389    os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4390    os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4391    os::Solaris::set_mutex_init(lwp_mutex_init);
4392    os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4393    os::Solaris::set_mutex_scope(USYNC_THREAD);
4394
4395    os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4396    os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4397    os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4398    os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4399    os::Solaris::set_cond_init(lwp_cond_init);
4400    os::Solaris::set_cond_destroy(lwp_cond_destroy);
4401    os::Solaris::set_cond_scope(USYNC_THREAD);
4402  }
4403  else {
4404    os::Solaris::set_mutex_scope(USYNC_THREAD);
4405    os::Solaris::set_cond_scope(USYNC_THREAD);
4406
4407    if(UsePthreads) {
4408      os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4409      os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4410      os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4411      os::Solaris::set_mutex_init(pthread_mutex_default_init);
4412      os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4413
4414      os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4415      os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4416      os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4417      os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4418      os::Solaris::set_cond_init(pthread_cond_default_init);
4419      os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4420    }
4421    else {
4422      os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4423      os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4424      os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4425      os::Solaris::set_mutex_init(::mutex_init);
4426      os::Solaris::set_mutex_destroy(::mutex_destroy);
4427
4428      os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4429      os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4430      os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4431      os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4432      os::Solaris::set_cond_init(::cond_init);
4433      os::Solaris::set_cond_destroy(::cond_destroy);
4434    }
4435  }
4436}
4437
4438bool os::Solaris::liblgrp_init() {
4439  void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4440  if (handle != NULL) {
4441    os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4442    os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4443    os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4444    os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4445    os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4446    os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4447    os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4448    os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4449                                       dlsym(handle, "lgrp_cookie_stale")));
4450
4451    lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4452    set_lgrp_cookie(c);
4453    return true;
4454  }
4455  return false;
4456}
4457
4458void os::Solaris::misc_sym_init() {
4459  address func;
4460
4461  // getisax
4462  func = resolve_symbol_lazy("getisax");
4463  if (func != NULL) {
4464    os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4465  }
4466
4467  // meminfo
4468  func = resolve_symbol_lazy("meminfo");
4469  if (func != NULL) {
4470    os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4471  }
4472}
4473
4474uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4475  assert(_getisax != NULL, "_getisax not set");
4476  return _getisax(array, n);
4477}
4478
4479// int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4480typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4481static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4482
4483void init_pset_getloadavg_ptr(void) {
4484  pset_getloadavg_ptr =
4485    (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4486  if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4487    warning("pset_getloadavg function not found");
4488  }
4489}
4490
4491int os::Solaris::_dev_zero_fd = -1;
4492
4493// this is called _before_ the global arguments have been parsed
4494void os::init(void) {
4495  _initial_pid = getpid();
4496
4497  max_hrtime = first_hrtime = gethrtime();
4498
4499  init_random(1234567);
4500
4501  page_size = sysconf(_SC_PAGESIZE);
4502  if (page_size == -1)
4503    fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4504                  strerror(errno)));
4505  init_page_sizes((size_t) page_size);
4506
4507  Solaris::initialize_system_info();
4508
4509  // Initialize misc. symbols as soon as possible, so we can use them
4510  // if we need them.
4511  Solaris::misc_sym_init();
4512
4513  int fd = ::open("/dev/zero", O_RDWR);
4514  if (fd < 0) {
4515    fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4516  } else {
4517    Solaris::set_dev_zero_fd(fd);
4518
4519    // Close on exec, child won't inherit.
4520    fcntl(fd, F_SETFD, FD_CLOEXEC);
4521  }
4522
4523  clock_tics_per_sec = CLK_TCK;
4524
4525  // check if dladdr1() exists; dladdr1 can provide more information than
4526  // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4527  // and is available on linker patches for 5.7 and 5.8.
4528  // libdl.so must have been loaded, this call is just an entry lookup
4529  void * hdl = dlopen("libdl.so", RTLD_NOW);
4530  if (hdl)
4531    dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4532
4533  // (Solaris only) this switches to calls that actually do locking.
4534  ThreadCritical::initialize();
4535
4536  main_thread = thr_self();
4537
4538  // Constant minimum stack size allowed. It must be at least
4539  // the minimum of what the OS supports (thr_min_stack()), and
4540  // enough to allow the thread to get to user bytecode execution.
4541  Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4542  // If the pagesize of the VM is greater than 8K determine the appropriate
4543  // number of initial guard pages.  The user can change this with the
4544  // command line arguments, if needed.
4545  if (vm_page_size() > 8*K) {
4546    StackYellowPages = 1;
4547    StackRedPages = 1;
4548    StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4549  }
4550}
4551
4552// To install functions for atexit system call
4553extern "C" {
4554  static void perfMemory_exit_helper() {
4555    perfMemory_exit();
4556  }
4557}
4558
4559// this is called _after_ the global arguments have been parsed
4560jint os::init_2(void) {
4561  // try to enable extended file IO ASAP, see 6431278
4562  os::Solaris::try_enable_extended_io();
4563
4564  // Allocate a single page and mark it as readable for safepoint polling.  Also
4565  // use this first mmap call to check support for MAP_ALIGN.
4566  address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4567                                                      page_size,
4568                                                      MAP_PRIVATE | MAP_ALIGN,
4569                                                      PROT_READ);
4570  if (polling_page == NULL) {
4571    has_map_align = false;
4572    polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4573                                                PROT_READ);
4574  }
4575
4576  os::set_polling_page(polling_page);
4577
4578#ifndef PRODUCT
4579  if( Verbose && PrintMiscellaneous )
4580    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4581#endif
4582
4583  if (!UseMembar) {
4584    address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
4585    guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4586    os::set_memory_serialize_page( mem_serialize_page );
4587
4588#ifndef PRODUCT
4589    if(Verbose && PrintMiscellaneous)
4590      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4591#endif
4592  }
4593
4594  // Check minimum allowable stack size for thread creation and to initialize
4595  // the java system classes, including StackOverflowError - depends on page
4596  // size.  Add a page for compiler2 recursion in main thread.
4597  // Add in 2*BytesPerWord times page size to account for VM stack during
4598  // class initialization depending on 32 or 64 bit VM.
4599  os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4600            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4601                    2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4602
4603  size_t threadStackSizeInBytes = ThreadStackSize * K;
4604  if (threadStackSizeInBytes != 0 &&
4605    threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4606    tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4607                  os::Solaris::min_stack_allowed/K);
4608    return JNI_ERR;
4609  }
4610
4611  // For 64kbps there will be a 64kb page size, which makes
4612  // the usable default stack size quite a bit less.  Increase the
4613  // stack for 64kb (or any > than 8kb) pages, this increases
4614  // virtual memory fragmentation (since we're not creating the
4615  // stack on a power of 2 boundary.  The real fix for this
4616  // should be to fix the guard page mechanism.
4617
4618  if (vm_page_size() > 8*K) {
4619      threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4620         ? threadStackSizeInBytes +
4621           ((StackYellowPages + StackRedPages) * vm_page_size())
4622         : 0;
4623      ThreadStackSize = threadStackSizeInBytes/K;
4624  }
4625
4626  // Make the stack size a multiple of the page size so that
4627  // the yellow/red zones can be guarded.
4628  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4629        vm_page_size()));
4630
4631  Solaris::libthread_init();
4632
4633  if (UseNUMA) {
4634    if (!Solaris::liblgrp_init()) {
4635      UseNUMA = false;
4636    } else {
4637      size_t lgrp_limit = os::numa_get_groups_num();
4638      int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4639      size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4640      FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4641      if (lgrp_num < 2) {
4642        // There's only one locality group, disable NUMA.
4643        UseNUMA = false;
4644      }
4645    }
4646    if (!UseNUMA && ForceNUMA) {
4647      UseNUMA = true;
4648    }
4649  }
4650
4651  Solaris::signal_sets_init();
4652  Solaris::init_signal_mem();
4653  Solaris::install_signal_handlers();
4654
4655  if (libjsigversion < JSIG_VERSION_1_4_1) {
4656    Maxlibjsigsigs = OLDMAXSIGNUM;
4657  }
4658
4659  // initialize synchronization primitives to use either thread or
4660  // lwp synchronization (controlled by UseLWPSynchronization)
4661  Solaris::synchronization_init();
4662
4663  if (MaxFDLimit) {
4664    // set the number of file descriptors to max. print out error
4665    // if getrlimit/setrlimit fails but continue regardless.
4666    struct rlimit nbr_files;
4667    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4668    if (status != 0) {
4669      if (PrintMiscellaneous && (Verbose || WizardMode))
4670        perror("os::init_2 getrlimit failed");
4671    } else {
4672      nbr_files.rlim_cur = nbr_files.rlim_max;
4673      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4674      if (status != 0) {
4675        if (PrintMiscellaneous && (Verbose || WizardMode))
4676          perror("os::init_2 setrlimit failed");
4677      }
4678    }
4679  }
4680
4681  // Calculate theoretical max. size of Threads to guard gainst
4682  // artifical out-of-memory situations, where all available address-
4683  // space has been reserved by thread stacks. Default stack size is 1Mb.
4684  size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4685    JavaThread::stack_size_at_create() : (1*K*K);
4686  assert(pre_thread_stack_size != 0, "Must have a stack");
4687  // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4688  // we should start doing Virtual Memory banging. Currently when the threads will
4689  // have used all but 200Mb of space.
4690  size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4691  Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4692
4693  // at-exit methods are called in the reverse order of their registration.
4694  // In Solaris 7 and earlier, atexit functions are called on return from
4695  // main or as a result of a call to exit(3C). There can be only 32 of
4696  // these functions registered and atexit() does not set errno. In Solaris
4697  // 8 and later, there is no limit to the number of functions registered
4698  // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4699  // functions are called upon dlclose(3DL) in addition to return from main
4700  // and exit(3C).
4701
4702  if (PerfAllowAtExitRegistration) {
4703    // only register atexit functions if PerfAllowAtExitRegistration is set.
4704    // atexit functions can be delayed until process exit time, which
4705    // can be problematic for embedded VM situations. Embedded VMs should
4706    // call DestroyJavaVM() to assure that VM resources are released.
4707
4708    // note: perfMemory_exit_helper atexit function may be removed in
4709    // the future if the appropriate cleanup code can be added to the
4710    // VM_Exit VMOperation's doit method.
4711    if (atexit(perfMemory_exit_helper) != 0) {
4712      warning("os::init2 atexit(perfMemory_exit_helper) failed");
4713    }
4714  }
4715
4716  // Init pset_loadavg function pointer
4717  init_pset_getloadavg_ptr();
4718
4719  return JNI_OK;
4720}
4721
4722void os::init_3(void) {
4723  return;
4724}
4725
4726// Mark the polling page as unreadable
4727void os::make_polling_page_unreadable(void) {
4728  if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
4729    fatal("Could not disable polling page");
4730};
4731
4732// Mark the polling page as readable
4733void os::make_polling_page_readable(void) {
4734  if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
4735    fatal("Could not enable polling page");
4736};
4737
4738// OS interface.
4739
4740bool os::check_heap(bool force) { return true; }
4741
4742typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
4743static vsnprintf_t sol_vsnprintf = NULL;
4744
4745int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
4746  if (!sol_vsnprintf) {
4747    //search  for the named symbol in the objects that were loaded after libjvm
4748    void* where = RTLD_NEXT;
4749    if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4750        sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4751    if (!sol_vsnprintf){
4752      //search  for the named symbol in the objects that were loaded before libjvm
4753      where = RTLD_DEFAULT;
4754      if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4755        sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4756      assert(sol_vsnprintf != NULL, "vsnprintf not found");
4757    }
4758  }
4759  return (*sol_vsnprintf)(buf, count, fmt, argptr);
4760}
4761
4762
4763// Is a (classpath) directory empty?
4764bool os::dir_is_empty(const char* path) {
4765  DIR *dir = NULL;
4766  struct dirent *ptr;
4767
4768  dir = opendir(path);
4769  if (dir == NULL) return true;
4770
4771  /* Scan the directory */
4772  bool result = true;
4773  char buf[sizeof(struct dirent) + MAX_PATH];
4774  struct dirent *dbuf = (struct dirent *) buf;
4775  while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4776    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4777      result = false;
4778    }
4779  }
4780  closedir(dir);
4781  return result;
4782}
4783
4784// This code originates from JDK's sysOpen and open64_w
4785// from src/solaris/hpi/src/system_md.c
4786
4787#ifndef O_DELETE
4788#define O_DELETE 0x10000
4789#endif
4790
4791// Open a file. Unlink the file immediately after open returns
4792// if the specified oflag has the O_DELETE flag set.
4793// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4794
4795int os::open(const char *path, int oflag, int mode) {
4796  if (strlen(path) > MAX_PATH - 1) {
4797    errno = ENAMETOOLONG;
4798    return -1;
4799  }
4800  int fd;
4801  int o_delete = (oflag & O_DELETE);
4802  oflag = oflag & ~O_DELETE;
4803
4804  fd = ::open64(path, oflag, mode);
4805  if (fd == -1) return -1;
4806
4807  //If the open succeeded, the file might still be a directory
4808  {
4809    struct stat64 buf64;
4810    int ret = ::fstat64(fd, &buf64);
4811    int st_mode = buf64.st_mode;
4812
4813    if (ret != -1) {
4814      if ((st_mode & S_IFMT) == S_IFDIR) {
4815        errno = EISDIR;
4816        ::close(fd);
4817        return -1;
4818      }
4819    } else {
4820      ::close(fd);
4821      return -1;
4822    }
4823  }
4824    /*
4825     * 32-bit Solaris systems suffer from:
4826     *
4827     * - an historical default soft limit of 256 per-process file
4828     *   descriptors that is too low for many Java programs.
4829     *
4830     * - a design flaw where file descriptors created using stdio
4831     *   fopen must be less than 256, _even_ when the first limit above
4832     *   has been raised.  This can cause calls to fopen (but not calls to
4833     *   open, for example) to fail mysteriously, perhaps in 3rd party
4834     *   native code (although the JDK itself uses fopen).  One can hardly
4835     *   criticize them for using this most standard of all functions.
4836     *
4837     * We attempt to make everything work anyways by:
4838     *
4839     * - raising the soft limit on per-process file descriptors beyond
4840     *   256
4841     *
4842     * - As of Solaris 10u4, we can request that Solaris raise the 256
4843     *   stdio fopen limit by calling function enable_extended_FILE_stdio.
4844     *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4845     *
4846     * - If we are stuck on an old (pre 10u4) Solaris system, we can
4847     *   workaround the bug by remapping non-stdio file descriptors below
4848     *   256 to ones beyond 256, which is done below.
4849     *
4850     * See:
4851     * 1085341: 32-bit stdio routines should support file descriptors >255
4852     * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4853     * 6431278: Netbeans crash on 32 bit Solaris: need to call
4854     *          enable_extended_FILE_stdio() in VM initialisation
4855     * Giri Mandalika's blog
4856     * http://technopark02.blogspot.com/2005_05_01_archive.html
4857     */
4858#ifndef  _LP64
4859     if ((!enabled_extended_FILE_stdio) && fd < 256) {
4860         int newfd = ::fcntl(fd, F_DUPFD, 256);
4861         if (newfd != -1) {
4862             ::close(fd);
4863             fd = newfd;
4864         }
4865     }
4866#endif // 32-bit Solaris
4867    /*
4868     * All file descriptors that are opened in the JVM and not
4869     * specifically destined for a subprocess should have the
4870     * close-on-exec flag set.  If we don't set it, then careless 3rd
4871     * party native code might fork and exec without closing all
4872     * appropriate file descriptors (e.g. as we do in closeDescriptors in
4873     * UNIXProcess.c), and this in turn might:
4874     *
4875     * - cause end-of-file to fail to be detected on some file
4876     *   descriptors, resulting in mysterious hangs, or
4877     *
4878     * - might cause an fopen in the subprocess to fail on a system
4879     *   suffering from bug 1085341.
4880     *
4881     * (Yes, the default setting of the close-on-exec flag is a Unix
4882     * design flaw)
4883     *
4884     * See:
4885     * 1085341: 32-bit stdio routines should support file descriptors >255
4886     * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4887     * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4888     */
4889#ifdef FD_CLOEXEC
4890    {
4891        int flags = ::fcntl(fd, F_GETFD);
4892        if (flags != -1)
4893            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4894    }
4895#endif
4896
4897  if (o_delete != 0) {
4898    ::unlink(path);
4899  }
4900  return fd;
4901}
4902
4903// create binary file, rewriting existing file if required
4904int os::create_binary_file(const char* path, bool rewrite_existing) {
4905  int oflags = O_WRONLY | O_CREAT;
4906  if (!rewrite_existing) {
4907    oflags |= O_EXCL;
4908  }
4909  return ::open64(path, oflags, S_IREAD | S_IWRITE);
4910}
4911
4912// return current position of file pointer
4913jlong os::current_file_offset(int fd) {
4914  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4915}
4916
4917// move file pointer to the specified offset
4918jlong os::seek_to_file_offset(int fd, jlong offset) {
4919  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4920}
4921
4922jlong os::lseek(int fd, jlong offset, int whence) {
4923  return (jlong) ::lseek64(fd, offset, whence);
4924}
4925
4926char * os::native_path(char *path) {
4927  return path;
4928}
4929
4930int os::ftruncate(int fd, jlong length) {
4931  return ::ftruncate64(fd, length);
4932}
4933
4934int os::fsync(int fd)  {
4935  RESTARTABLE_RETURN_INT(::fsync(fd));
4936}
4937
4938int os::available(int fd, jlong *bytes) {
4939  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4940          "Assumed _thread_in_native");
4941  jlong cur, end;
4942  int mode;
4943  struct stat64 buf64;
4944
4945  if (::fstat64(fd, &buf64) >= 0) {
4946    mode = buf64.st_mode;
4947    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4948      int n,ioctl_return;
4949
4950      RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4951      if (ioctl_return>= 0) {
4952          *bytes = n;
4953        return 1;
4954      }
4955    }
4956  }
4957  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4958    return 0;
4959  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4960    return 0;
4961  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4962    return 0;
4963  }
4964  *bytes = end - cur;
4965  return 1;
4966}
4967
4968// Map a block of memory.
4969char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4970                     char *addr, size_t bytes, bool read_only,
4971                     bool allow_exec) {
4972  int prot;
4973  int flags;
4974
4975  if (read_only) {
4976    prot = PROT_READ;
4977    flags = MAP_SHARED;
4978  } else {
4979    prot = PROT_READ | PROT_WRITE;
4980    flags = MAP_PRIVATE;
4981  }
4982
4983  if (allow_exec) {
4984    prot |= PROT_EXEC;
4985  }
4986
4987  if (addr != NULL) {
4988    flags |= MAP_FIXED;
4989  }
4990
4991  char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
4992                                     fd, file_offset);
4993  if (mapped_address == MAP_FAILED) {
4994    return NULL;
4995  }
4996  return mapped_address;
4997}
4998
4999
5000// Remap a block of memory.
5001char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5002                       char *addr, size_t bytes, bool read_only,
5003                       bool allow_exec) {
5004  // same as map_memory() on this OS
5005  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5006                        allow_exec);
5007}
5008
5009
5010// Unmap a block of memory.
5011bool os::pd_unmap_memory(char* addr, size_t bytes) {
5012  return munmap(addr, bytes) == 0;
5013}
5014
5015void os::pause() {
5016  char filename[MAX_PATH];
5017  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5018    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5019  } else {
5020    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5021  }
5022
5023  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5024  if (fd != -1) {
5025    struct stat buf;
5026    ::close(fd);
5027    while (::stat(filename, &buf) == 0) {
5028      (void)::poll(NULL, 0, 100);
5029    }
5030  } else {
5031    jio_fprintf(stderr,
5032      "Could not open pause file '%s', continuing immediately.\n", filename);
5033  }
5034}
5035
5036#ifndef PRODUCT
5037#ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5038// Turn this on if you need to trace synch operations.
5039// Set RECORD_SYNCH_LIMIT to a large-enough value,
5040// and call record_synch_enable and record_synch_disable
5041// around the computation of interest.
5042
5043void record_synch(char* name, bool returning);  // defined below
5044
5045class RecordSynch {
5046  char* _name;
5047 public:
5048  RecordSynch(char* name) :_name(name)
5049                 { record_synch(_name, false); }
5050  ~RecordSynch() { record_synch(_name,   true);  }
5051};
5052
5053#define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5054extern "C" ret name params {                                    \
5055  typedef ret name##_t params;                                  \
5056  static name##_t* implem = NULL;                               \
5057  static int callcount = 0;                                     \
5058  if (implem == NULL) {                                         \
5059    implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5060    if (implem == NULL)  fatal(dlerror());                      \
5061  }                                                             \
5062  ++callcount;                                                  \
5063  RecordSynch _rs(#name);                                       \
5064  inner;                                                        \
5065  return implem args;                                           \
5066}
5067// in dbx, examine callcounts this way:
5068// for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5069
5070#define CHECK_POINTER_OK(p) \
5071  (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5072#define CHECK_MU \
5073  if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5074#define CHECK_CV \
5075  if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5076#define CHECK_P(p) \
5077  if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5078
5079#define CHECK_MUTEX(mutex_op) \
5080CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5081
5082CHECK_MUTEX(   mutex_lock)
5083CHECK_MUTEX(  _mutex_lock)
5084CHECK_MUTEX( mutex_unlock)
5085CHECK_MUTEX(_mutex_unlock)
5086CHECK_MUTEX( mutex_trylock)
5087CHECK_MUTEX(_mutex_trylock)
5088
5089#define CHECK_COND(cond_op) \
5090CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5091
5092CHECK_COND( cond_wait);
5093CHECK_COND(_cond_wait);
5094CHECK_COND(_cond_wait_cancel);
5095
5096#define CHECK_COND2(cond_op) \
5097CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5098
5099CHECK_COND2( cond_timedwait);
5100CHECK_COND2(_cond_timedwait);
5101CHECK_COND2(_cond_timedwait_cancel);
5102
5103// do the _lwp_* versions too
5104#define mutex_t lwp_mutex_t
5105#define cond_t  lwp_cond_t
5106CHECK_MUTEX(  _lwp_mutex_lock)
5107CHECK_MUTEX(  _lwp_mutex_unlock)
5108CHECK_MUTEX(  _lwp_mutex_trylock)
5109CHECK_MUTEX( __lwp_mutex_lock)
5110CHECK_MUTEX( __lwp_mutex_unlock)
5111CHECK_MUTEX( __lwp_mutex_trylock)
5112CHECK_MUTEX(___lwp_mutex_lock)
5113CHECK_MUTEX(___lwp_mutex_unlock)
5114
5115CHECK_COND(  _lwp_cond_wait);
5116CHECK_COND( __lwp_cond_wait);
5117CHECK_COND(___lwp_cond_wait);
5118
5119CHECK_COND2(  _lwp_cond_timedwait);
5120CHECK_COND2( __lwp_cond_timedwait);
5121#undef mutex_t
5122#undef cond_t
5123
5124CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5125CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5126CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5127CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5128CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5129CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5130CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5131CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5132
5133
5134// recording machinery:
5135
5136enum { RECORD_SYNCH_LIMIT = 200 };
5137char* record_synch_name[RECORD_SYNCH_LIMIT];
5138void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5139bool record_synch_returning[RECORD_SYNCH_LIMIT];
5140thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5141int record_synch_count = 0;
5142bool record_synch_enabled = false;
5143
5144// in dbx, examine recorded data this way:
5145// for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5146
5147void record_synch(char* name, bool returning) {
5148  if (record_synch_enabled) {
5149    if (record_synch_count < RECORD_SYNCH_LIMIT) {
5150      record_synch_name[record_synch_count] = name;
5151      record_synch_returning[record_synch_count] = returning;
5152      record_synch_thread[record_synch_count] = thr_self();
5153      record_synch_arg0ptr[record_synch_count] = &name;
5154      record_synch_count++;
5155    }
5156    // put more checking code here:
5157    // ...
5158  }
5159}
5160
5161void record_synch_enable() {
5162  // start collecting trace data, if not already doing so
5163  if (!record_synch_enabled)  record_synch_count = 0;
5164  record_synch_enabled = true;
5165}
5166
5167void record_synch_disable() {
5168  // stop collecting trace data
5169  record_synch_enabled = false;
5170}
5171
5172#endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5173#endif // PRODUCT
5174
5175const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5176const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5177                               (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5178
5179
5180// JVMTI & JVM monitoring and management support
5181// The thread_cpu_time() and current_thread_cpu_time() are only
5182// supported if is_thread_cpu_time_supported() returns true.
5183// They are not supported on Solaris T1.
5184
5185// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5186// are used by JVM M&M and JVMTI to get user+sys or user CPU time
5187// of a thread.
5188//
5189// current_thread_cpu_time() and thread_cpu_time(Thread *)
5190// returns the fast estimate available on the platform.
5191
5192// hrtime_t gethrvtime() return value includes
5193// user time but does not include system time
5194jlong os::current_thread_cpu_time() {
5195  return (jlong) gethrvtime();
5196}
5197
5198jlong os::thread_cpu_time(Thread *thread) {
5199  // return user level CPU time only to be consistent with
5200  // what current_thread_cpu_time returns.
5201  // thread_cpu_time_info() must be changed if this changes
5202  return os::thread_cpu_time(thread, false /* user time only */);
5203}
5204
5205jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5206  if (user_sys_cpu_time) {
5207    return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5208  } else {
5209    return os::current_thread_cpu_time();
5210  }
5211}
5212
5213jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5214  char proc_name[64];
5215  int count;
5216  prusage_t prusage;
5217  jlong lwp_time;
5218  int fd;
5219
5220  sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5221                     getpid(),
5222                     thread->osthread()->lwp_id());
5223  fd = ::open(proc_name, O_RDONLY);
5224  if ( fd == -1 ) return -1;
5225
5226  do {
5227    count = ::pread(fd,
5228                  (void *)&prusage.pr_utime,
5229                  thr_time_size,
5230                  thr_time_off);
5231  } while (count < 0 && errno == EINTR);
5232  ::close(fd);
5233  if ( count < 0 ) return -1;
5234
5235  if (user_sys_cpu_time) {
5236    // user + system CPU time
5237    lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5238                 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5239                 (jlong)prusage.pr_stime.tv_nsec +
5240                 (jlong)prusage.pr_utime.tv_nsec;
5241  } else {
5242    // user level CPU time only
5243    lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5244                (jlong)prusage.pr_utime.tv_nsec;
5245  }
5246
5247  return(lwp_time);
5248}
5249
5250void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5251  info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5252  info_ptr->may_skip_backward = false;    // elapsed time not wall time
5253  info_ptr->may_skip_forward = false;     // elapsed time not wall time
5254  info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5255}
5256
5257void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5258  info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5259  info_ptr->may_skip_backward = false;    // elapsed time not wall time
5260  info_ptr->may_skip_forward = false;     // elapsed time not wall time
5261  info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5262}
5263
5264bool os::is_thread_cpu_time_supported() {
5265  return true;
5266}
5267
5268// System loadavg support.  Returns -1 if load average cannot be obtained.
5269// Return the load average for our processor set if the primitive exists
5270// (Solaris 9 and later).  Otherwise just return system wide loadavg.
5271int os::loadavg(double loadavg[], int nelem) {
5272  if (pset_getloadavg_ptr != NULL) {
5273    return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5274  } else {
5275    return ::getloadavg(loadavg, nelem);
5276  }
5277}
5278
5279//---------------------------------------------------------------------------------
5280
5281bool os::find(address addr, outputStream* st) {
5282  Dl_info dlinfo;
5283  memset(&dlinfo, 0, sizeof(dlinfo));
5284  if (dladdr(addr, &dlinfo) != 0) {
5285    st->print(PTR_FORMAT ": ", addr);
5286    if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5287      st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5288    } else if (dlinfo.dli_fbase != NULL)
5289      st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5290    else
5291      st->print("<absolute address>");
5292    if (dlinfo.dli_fname != NULL) {
5293      st->print(" in %s", dlinfo.dli_fname);
5294    }
5295    if (dlinfo.dli_fbase != NULL) {
5296      st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5297    }
5298    st->cr();
5299
5300    if (Verbose) {
5301      // decode some bytes around the PC
5302      address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5303      address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5304      address       lowest = (address) dlinfo.dli_sname;
5305      if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5306      if (begin < lowest)  begin = lowest;
5307      Dl_info dlinfo2;
5308      if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5309          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5310        end = (address) dlinfo2.dli_saddr;
5311      Disassembler::decode(begin, end, st);
5312    }
5313    return true;
5314  }
5315  return false;
5316}
5317
5318// Following function has been added to support HotSparc's libjvm.so running
5319// under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5320// src/solaris/hpi/native_threads in the EVM codebase.
5321//
5322// NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5323// libraries and should thus be removed. We will leave it behind for a while
5324// until we no longer want to able to run on top of 1.3.0 Solaris production
5325// JDK. See 4341971.
5326
5327#define STACK_SLACK 0x800
5328
5329extern "C" {
5330  intptr_t sysThreadAvailableStackWithSlack() {
5331    stack_t st;
5332    intptr_t retval, stack_top;
5333    retval = thr_stksegment(&st);
5334    assert(retval == 0, "incorrect return value from thr_stksegment");
5335    assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5336    assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5337    stack_top=(intptr_t)st.ss_sp-st.ss_size;
5338    return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5339  }
5340}
5341
5342// ObjectMonitor park-unpark infrastructure ...
5343//
5344// We implement Solaris and Linux PlatformEvents with the
5345// obvious condvar-mutex-flag triple.
5346// Another alternative that works quite well is pipes:
5347// Each PlatformEvent consists of a pipe-pair.
5348// The thread associated with the PlatformEvent
5349// calls park(), which reads from the input end of the pipe.
5350// Unpark() writes into the other end of the pipe.
5351// The write-side of the pipe must be set NDELAY.
5352// Unfortunately pipes consume a large # of handles.
5353// Native solaris lwp_park() and lwp_unpark() work nicely, too.
5354// Using pipes for the 1st few threads might be workable, however.
5355//
5356// park() is permitted to return spuriously.
5357// Callers of park() should wrap the call to park() in
5358// an appropriate loop.  A litmus test for the correct
5359// usage of park is the following: if park() were modified
5360// to immediately return 0 your code should still work,
5361// albeit degenerating to a spin loop.
5362//
5363// An interesting optimization for park() is to use a trylock()
5364// to attempt to acquire the mutex.  If the trylock() fails
5365// then we know that a concurrent unpark() operation is in-progress.
5366// in that case the park() code could simply set _count to 0
5367// and return immediately.  The subsequent park() operation *might*
5368// return immediately.  That's harmless as the caller of park() is
5369// expected to loop.  By using trylock() we will have avoided a
5370// avoided a context switch caused by contention on the per-thread mutex.
5371//
5372// TODO-FIXME:
5373// 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5374//     objectmonitor implementation.
5375// 2.  Collapse the JSR166 parker event, and the
5376//     objectmonitor ParkEvent into a single "Event" construct.
5377// 3.  In park() and unpark() add:
5378//     assert (Thread::current() == AssociatedWith).
5379// 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5380//     1-out-of-N park() operations will return immediately.
5381//
5382// _Event transitions in park()
5383//   -1 => -1 : illegal
5384//    1 =>  0 : pass - return immediately
5385//    0 => -1 : block
5386//
5387// _Event serves as a restricted-range semaphore.
5388//
5389// Another possible encoding of _Event would be with
5390// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5391//
5392// TODO-FIXME: add DTRACE probes for:
5393// 1.   Tx parks
5394// 2.   Ty unparks Tx
5395// 3.   Tx resumes from park
5396
5397
5398// value determined through experimentation
5399#define ROUNDINGFIX 11
5400
5401// utility to compute the abstime argument to timedwait.
5402// TODO-FIXME: switch from compute_abstime() to unpackTime().
5403
5404static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5405  // millis is the relative timeout time
5406  // abstime will be the absolute timeout time
5407  if (millis < 0)  millis = 0;
5408  struct timeval now;
5409  int status = gettimeofday(&now, NULL);
5410  assert(status == 0, "gettimeofday");
5411  jlong seconds = millis / 1000;
5412  jlong max_wait_period;
5413
5414  if (UseLWPSynchronization) {
5415    // forward port of fix for 4275818 (not sleeping long enough)
5416    // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5417    // _lwp_cond_timedwait() used a round_down algorithm rather
5418    // than a round_up. For millis less than our roundfactor
5419    // it rounded down to 0 which doesn't meet the spec.
5420    // For millis > roundfactor we may return a bit sooner, but
5421    // since we can not accurately identify the patch level and
5422    // this has already been fixed in Solaris 9 and 8 we will
5423    // leave it alone rather than always rounding down.
5424
5425    if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5426       // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5427           // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5428           max_wait_period = 21000000;
5429  } else {
5430    max_wait_period = 50000000;
5431  }
5432  millis %= 1000;
5433  if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5434     seconds = max_wait_period;
5435  }
5436  abstime->tv_sec = now.tv_sec  + seconds;
5437  long       usec = now.tv_usec + millis * 1000;
5438  if (usec >= 1000000) {
5439    abstime->tv_sec += 1;
5440    usec -= 1000000;
5441  }
5442  abstime->tv_nsec = usec * 1000;
5443  return abstime;
5444}
5445
5446// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5447// Conceptually TryPark() should be equivalent to park(0).
5448
5449int os::PlatformEvent::TryPark() {
5450  for (;;) {
5451    const int v = _Event ;
5452    guarantee ((v == 0) || (v == 1), "invariant") ;
5453    if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5454  }
5455}
5456
5457void os::PlatformEvent::park() {           // AKA: down()
5458  // Invariant: Only the thread associated with the Event/PlatformEvent
5459  // may call park().
5460  int v ;
5461  for (;;) {
5462      v = _Event ;
5463      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5464  }
5465  guarantee (v >= 0, "invariant") ;
5466  if (v == 0) {
5467     // Do this the hard way by blocking ...
5468     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5469     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5470     // Only for SPARC >= V8PlusA
5471#if defined(__sparc) && defined(COMPILER2)
5472     if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5473#endif
5474     int status = os::Solaris::mutex_lock(_mutex);
5475     assert_status(status == 0, status,  "mutex_lock");
5476     guarantee (_nParked == 0, "invariant") ;
5477     ++ _nParked ;
5478     while (_Event < 0) {
5479        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5480        // Treat this the same as if the wait was interrupted
5481        // With usr/lib/lwp going to kernel, always handle ETIME
5482        status = os::Solaris::cond_wait(_cond, _mutex);
5483        if (status == ETIME) status = EINTR ;
5484        assert_status(status == 0 || status == EINTR, status, "cond_wait");
5485     }
5486     -- _nParked ;
5487     _Event = 0 ;
5488     status = os::Solaris::mutex_unlock(_mutex);
5489     assert_status(status == 0, status, "mutex_unlock");
5490    // Paranoia to ensure our locked and lock-free paths interact
5491    // correctly with each other.
5492    OrderAccess::fence();
5493  }
5494}
5495
5496int os::PlatformEvent::park(jlong millis) {
5497  guarantee (_nParked == 0, "invariant") ;
5498  int v ;
5499  for (;;) {
5500      v = _Event ;
5501      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5502  }
5503  guarantee (v >= 0, "invariant") ;
5504  if (v != 0) return OS_OK ;
5505
5506  int ret = OS_TIMEOUT;
5507  timestruc_t abst;
5508  compute_abstime (&abst, millis);
5509
5510  // See http://monaco.sfbay/detail.jsf?cr=5094058.
5511  // For Solaris SPARC set fprs.FEF=0 prior to parking.
5512  // Only for SPARC >= V8PlusA
5513#if defined(__sparc) && defined(COMPILER2)
5514 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5515#endif
5516  int status = os::Solaris::mutex_lock(_mutex);
5517  assert_status(status == 0, status, "mutex_lock");
5518  guarantee (_nParked == 0, "invariant") ;
5519  ++ _nParked ;
5520  while (_Event < 0) {
5521     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5522     assert_status(status == 0 || status == EINTR ||
5523                   status == ETIME || status == ETIMEDOUT,
5524                   status, "cond_timedwait");
5525     if (!FilterSpuriousWakeups) break ;                // previous semantics
5526     if (status == ETIME || status == ETIMEDOUT) break ;
5527     // We consume and ignore EINTR and spurious wakeups.
5528  }
5529  -- _nParked ;
5530  if (_Event >= 0) ret = OS_OK ;
5531  _Event = 0 ;
5532  status = os::Solaris::mutex_unlock(_mutex);
5533  assert_status(status == 0, status, "mutex_unlock");
5534  // Paranoia to ensure our locked and lock-free paths interact
5535  // correctly with each other.
5536  OrderAccess::fence();
5537  return ret;
5538}
5539
5540void os::PlatformEvent::unpark() {
5541  // Transitions for _Event:
5542  //    0 :=> 1
5543  //    1 :=> 1
5544  //   -1 :=> either 0 or 1; must signal target thread
5545  //          That is, we can safely transition _Event from -1 to either
5546  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5547  //          unpark() calls.
5548  // See also: "Semaphores in Plan 9" by Mullender & Cox
5549  //
5550  // Note: Forcing a transition from "-1" to "1" on an unpark() means
5551  // that it will take two back-to-back park() calls for the owning
5552  // thread to block. This has the benefit of forcing a spurious return
5553  // from the first park() call after an unpark() call which will help
5554  // shake out uses of park() and unpark() without condition variables.
5555
5556  if (Atomic::xchg(1, &_Event) >= 0) return;
5557
5558  // If the thread associated with the event was parked, wake it.
5559  // Wait for the thread assoc with the PlatformEvent to vacate.
5560  int status = os::Solaris::mutex_lock(_mutex);
5561  assert_status(status == 0, status, "mutex_lock");
5562  int AnyWaiters = _nParked;
5563  status = os::Solaris::mutex_unlock(_mutex);
5564  assert_status(status == 0, status, "mutex_unlock");
5565  guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5566  if (AnyWaiters != 0) {
5567    // We intentional signal *after* dropping the lock
5568    // to avoid a common class of futile wakeups.
5569    status = os::Solaris::cond_signal(_cond);
5570    assert_status(status == 0, status, "cond_signal");
5571  }
5572}
5573
5574// JSR166
5575// -------------------------------------------------------
5576
5577/*
5578 * The solaris and linux implementations of park/unpark are fairly
5579 * conservative for now, but can be improved. They currently use a
5580 * mutex/condvar pair, plus _counter.
5581 * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5582 * sets count to 1 and signals condvar.  Only one thread ever waits
5583 * on the condvar. Contention seen when trying to park implies that someone
5584 * is unparking you, so don't wait. And spurious returns are fine, so there
5585 * is no need to track notifications.
5586 */
5587
5588#define MAX_SECS 100000000
5589/*
5590 * This code is common to linux and solaris and will be moved to a
5591 * common place in dolphin.
5592 *
5593 * The passed in time value is either a relative time in nanoseconds
5594 * or an absolute time in milliseconds. Either way it has to be unpacked
5595 * into suitable seconds and nanoseconds components and stored in the
5596 * given timespec structure.
5597 * Given time is a 64-bit value and the time_t used in the timespec is only
5598 * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5599 * overflow if times way in the future are given. Further on Solaris versions
5600 * prior to 10 there is a restriction (see cond_timedwait) that the specified
5601 * number of seconds, in abstime, is less than current_time  + 100,000,000.
5602 * As it will be 28 years before "now + 100000000" will overflow we can
5603 * ignore overflow and just impose a hard-limit on seconds using the value
5604 * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5605 * years from "now".
5606 */
5607static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5608  assert (time > 0, "convertTime");
5609
5610  struct timeval now;
5611  int status = gettimeofday(&now, NULL);
5612  assert(status == 0, "gettimeofday");
5613
5614  time_t max_secs = now.tv_sec + MAX_SECS;
5615
5616  if (isAbsolute) {
5617    jlong secs = time / 1000;
5618    if (secs > max_secs) {
5619      absTime->tv_sec = max_secs;
5620    }
5621    else {
5622      absTime->tv_sec = secs;
5623    }
5624    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5625  }
5626  else {
5627    jlong secs = time / NANOSECS_PER_SEC;
5628    if (secs >= MAX_SECS) {
5629      absTime->tv_sec = max_secs;
5630      absTime->tv_nsec = 0;
5631    }
5632    else {
5633      absTime->tv_sec = now.tv_sec + secs;
5634      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5635      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5636        absTime->tv_nsec -= NANOSECS_PER_SEC;
5637        ++absTime->tv_sec; // note: this must be <= max_secs
5638      }
5639    }
5640  }
5641  assert(absTime->tv_sec >= 0, "tv_sec < 0");
5642  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5643  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5644  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5645}
5646
5647void Parker::park(bool isAbsolute, jlong time) {
5648  // Ideally we'd do something useful while spinning, such
5649  // as calling unpackTime().
5650
5651  // Optional fast-path check:
5652  // Return immediately if a permit is available.
5653  // We depend on Atomic::xchg() having full barrier semantics
5654  // since we are doing a lock-free update to _counter.
5655  if (Atomic::xchg(0, &_counter) > 0) return;
5656
5657  // Optional fast-exit: Check interrupt before trying to wait
5658  Thread* thread = Thread::current();
5659  assert(thread->is_Java_thread(), "Must be JavaThread");
5660  JavaThread *jt = (JavaThread *)thread;
5661  if (Thread::is_interrupted(thread, false)) {
5662    return;
5663  }
5664
5665  // First, demultiplex/decode time arguments
5666  timespec absTime;
5667  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
5668    return;
5669  }
5670  if (time > 0) {
5671    // Warning: this code might be exposed to the old Solaris time
5672    // round-down bugs.  Grep "roundingFix" for details.
5673    unpackTime(&absTime, isAbsolute, time);
5674  }
5675
5676  // Enter safepoint region
5677  // Beware of deadlocks such as 6317397.
5678  // The per-thread Parker:: _mutex is a classic leaf-lock.
5679  // In particular a thread must never block on the Threads_lock while
5680  // holding the Parker:: mutex.  If safepoints are pending both the
5681  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5682  ThreadBlockInVM tbivm(jt);
5683
5684  // Don't wait if cannot get lock since interference arises from
5685  // unblocking.  Also. check interrupt before trying wait
5686  if (Thread::is_interrupted(thread, false) ||
5687      os::Solaris::mutex_trylock(_mutex) != 0) {
5688    return;
5689  }
5690
5691  int status ;
5692
5693  if (_counter > 0)  { // no wait needed
5694    _counter = 0;
5695    status = os::Solaris::mutex_unlock(_mutex);
5696    assert (status == 0, "invariant") ;
5697    // Paranoia to ensure our locked and lock-free paths interact
5698    // correctly with each other and Java-level accesses.
5699    OrderAccess::fence();
5700    return;
5701  }
5702
5703#ifdef ASSERT
5704  // Don't catch signals while blocked; let the running threads have the signals.
5705  // (This allows a debugger to break into the running thread.)
5706  sigset_t oldsigs;
5707  sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5708  thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5709#endif
5710
5711  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5712  jt->set_suspend_equivalent();
5713  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5714
5715  // Do this the hard way by blocking ...
5716  // See http://monaco.sfbay/detail.jsf?cr=5094058.
5717  // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5718  // Only for SPARC >= V8PlusA
5719#if defined(__sparc) && defined(COMPILER2)
5720  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5721#endif
5722
5723  if (time == 0) {
5724    status = os::Solaris::cond_wait (_cond, _mutex) ;
5725  } else {
5726    status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5727  }
5728  // Note that an untimed cond_wait() can sometimes return ETIME on older
5729  // versions of the Solaris.
5730  assert_status(status == 0 || status == EINTR ||
5731                status == ETIME || status == ETIMEDOUT,
5732                status, "cond_timedwait");
5733
5734#ifdef ASSERT
5735  thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5736#endif
5737  _counter = 0 ;
5738  status = os::Solaris::mutex_unlock(_mutex);
5739  assert_status(status == 0, status, "mutex_unlock") ;
5740  // Paranoia to ensure our locked and lock-free paths interact
5741  // correctly with each other and Java-level accesses.
5742  OrderAccess::fence();
5743
5744  // If externally suspended while waiting, re-suspend
5745  if (jt->handle_special_suspend_equivalent_condition()) {
5746    jt->java_suspend_self();
5747  }
5748}
5749
5750void Parker::unpark() {
5751  int s, status ;
5752  status = os::Solaris::mutex_lock (_mutex) ;
5753  assert (status == 0, "invariant") ;
5754  s = _counter;
5755  _counter = 1;
5756  status = os::Solaris::mutex_unlock (_mutex) ;
5757  assert (status == 0, "invariant") ;
5758
5759  if (s < 1) {
5760    status = os::Solaris::cond_signal (_cond) ;
5761    assert (status == 0, "invariant") ;
5762  }
5763}
5764
5765extern char** environ;
5766
5767// Run the specified command in a separate process. Return its exit value,
5768// or -1 on failure (e.g. can't fork a new process).
5769// Unlike system(), this function can be called from signal handler. It
5770// doesn't block SIGINT et al.
5771int os::fork_and_exec(char* cmd) {
5772  char * argv[4];
5773  argv[0] = (char *)"sh";
5774  argv[1] = (char *)"-c";
5775  argv[2] = cmd;
5776  argv[3] = NULL;
5777
5778  // fork is async-safe, fork1 is not so can't use in signal handler
5779  pid_t pid;
5780  Thread* t = ThreadLocalStorage::get_thread_slow();
5781  if (t != NULL && t->is_inside_signal_handler()) {
5782    pid = fork();
5783  } else {
5784    pid = fork1();
5785  }
5786
5787  if (pid < 0) {
5788    // fork failed
5789    warning("fork failed: %s", strerror(errno));
5790    return -1;
5791
5792  } else if (pid == 0) {
5793    // child process
5794
5795    // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5796    execve("/usr/bin/sh", argv, environ);
5797
5798    // execve failed
5799    _exit(-1);
5800
5801  } else  {
5802    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5803    // care about the actual exit code, for now.
5804
5805    int status;
5806
5807    // Wait for the child process to exit.  This returns immediately if
5808    // the child has already exited. */
5809    while (waitpid(pid, &status, 0) < 0) {
5810        switch (errno) {
5811        case ECHILD: return 0;
5812        case EINTR: break;
5813        default: return -1;
5814        }
5815    }
5816
5817    if (WIFEXITED(status)) {
5818       // The child exited normally; get its exit code.
5819       return WEXITSTATUS(status);
5820    } else if (WIFSIGNALED(status)) {
5821       // The child exited because of a signal
5822       // The best value to return is 0x80 + signal number,
5823       // because that is what all Unix shells do, and because
5824       // it allows callers to distinguish between process exit and
5825       // process death by signal.
5826       return 0x80 + WTERMSIG(status);
5827    } else {
5828       // Unknown exit code; pass it through
5829       return status;
5830    }
5831  }
5832}
5833
5834// is_headless_jre()
5835//
5836// Test for the existence of xawt/libmawt.so or libawt_xawt.so
5837// in order to report if we are running in a headless jre
5838//
5839// Since JDK8 xawt/libmawt.so was moved into the same directory
5840// as libawt.so, and renamed libawt_xawt.so
5841//
5842bool os::is_headless_jre() {
5843    struct stat statbuf;
5844    char buf[MAXPATHLEN];
5845    char libmawtpath[MAXPATHLEN];
5846    const char *xawtstr  = "/xawt/libmawt.so";
5847    const char *new_xawtstr = "/libawt_xawt.so";
5848    char *p;
5849
5850    // Get path to libjvm.so
5851    os::jvm_path(buf, sizeof(buf));
5852
5853    // Get rid of libjvm.so
5854    p = strrchr(buf, '/');
5855    if (p == NULL) return false;
5856    else *p = '\0';
5857
5858    // Get rid of client or server
5859    p = strrchr(buf, '/');
5860    if (p == NULL) return false;
5861    else *p = '\0';
5862
5863    // check xawt/libmawt.so
5864    strcpy(libmawtpath, buf);
5865    strcat(libmawtpath, xawtstr);
5866    if (::stat(libmawtpath, &statbuf) == 0) return false;
5867
5868    // check libawt_xawt.so
5869    strcpy(libmawtpath, buf);
5870    strcat(libmawtpath, new_xawtstr);
5871    if (::stat(libmawtpath, &statbuf) == 0) return false;
5872
5873    return true;
5874}
5875
5876size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5877  size_t res;
5878  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5879          "Assumed _thread_in_native");
5880  RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5881  return res;
5882}
5883
5884int os::close(int fd) {
5885  return ::close(fd);
5886}
5887
5888int os::socket_close(int fd) {
5889  return ::close(fd);
5890}
5891
5892int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5893  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5894          "Assumed _thread_in_native");
5895  RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5896}
5897
5898int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5899  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5900          "Assumed _thread_in_native");
5901  RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5902}
5903
5904int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5905  RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5906}
5907
5908// As both poll and select can be interrupted by signals, we have to be
5909// prepared to restart the system call after updating the timeout, unless
5910// a poll() is done with timeout == -1, in which case we repeat with this
5911// "wait forever" value.
5912
5913int os::timeout(int fd, long timeout) {
5914  int res;
5915  struct timeval t;
5916  julong prevtime, newtime;
5917  static const char* aNull = 0;
5918  struct pollfd pfd;
5919  pfd.fd = fd;
5920  pfd.events = POLLIN;
5921
5922  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5923          "Assumed _thread_in_native");
5924
5925  gettimeofday(&t, &aNull);
5926  prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
5927
5928  for(;;) {
5929    res = ::poll(&pfd, 1, timeout);
5930    if(res == OS_ERR && errno == EINTR) {
5931        if(timeout != -1) {
5932          gettimeofday(&t, &aNull);
5933          newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
5934          timeout -= newtime - prevtime;
5935          if(timeout <= 0)
5936            return OS_OK;
5937          prevtime = newtime;
5938        }
5939    } else return res;
5940  }
5941}
5942
5943int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5944  int _result;
5945  _result = ::connect(fd, him, len);
5946
5947  // On Solaris, when a connect() call is interrupted, the connection
5948  // can be established asynchronously (see 6343810). Subsequent calls
5949  // to connect() must check the errno value which has the semantic
5950  // described below (copied from the connect() man page). Handling
5951  // of asynchronously established connections is required for both
5952  // blocking and non-blocking sockets.
5953  //     EINTR            The  connection  attempt  was   interrupted
5954  //                      before  any data arrived by the delivery of
5955  //                      a signal. The connection, however, will  be
5956  //                      established asynchronously.
5957  //
5958  //     EINPROGRESS      The socket is non-blocking, and the connec-
5959  //                      tion  cannot  be completed immediately.
5960  //
5961  //     EALREADY         The socket is non-blocking,  and a previous
5962  //                      connection  attempt  has  not yet been com-
5963  //                      pleted.
5964  //
5965  //     EISCONN          The socket is already connected.
5966  if (_result == OS_ERR && errno == EINTR) {
5967     /* restarting a connect() changes its errno semantics */
5968     RESTARTABLE(::connect(fd, him, len), _result);
5969     /* undo these changes */
5970     if (_result == OS_ERR) {
5971       if (errno == EALREADY) {
5972         errno = EINPROGRESS; /* fall through */
5973       } else if (errno == EISCONN) {
5974         errno = 0;
5975         return OS_OK;
5976       }
5977     }
5978   }
5979   return _result;
5980 }
5981
5982int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5983  if (fd < 0) {
5984    return OS_ERR;
5985  }
5986  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5987          "Assumed _thread_in_native");
5988  RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
5989}
5990
5991int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
5992                 sockaddr* from, socklen_t* fromlen) {
5993  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5994          "Assumed _thread_in_native");
5995  RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
5996}
5997
5998int os::sendto(int fd, char* buf, size_t len, uint flags,
5999               struct sockaddr* to, socklen_t tolen) {
6000  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6001          "Assumed _thread_in_native");
6002  RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
6003}
6004
6005int os::socket_available(int fd, jint *pbytes) {
6006  if (fd < 0) {
6007    return OS_OK;
6008  }
6009  int ret;
6010  RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6011  // note: ioctl can return 0 when successful, JVM_SocketAvailable
6012  // is expected to return 0 on failure and 1 on success to the jdk.
6013  return (ret == OS_ERR) ? 0 : 1;
6014}
6015
6016int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6017  assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6018          "Assumed _thread_in_native");
6019   return ::bind(fd, him, len);
6020}
6021
6022// Get the default path to the core file
6023// Returns the length of the string
6024int os::get_core_path(char* buffer, size_t bufferSize) {
6025  const char* p = get_current_directory(buffer, bufferSize);
6026
6027  if (p == NULL) {
6028    assert(p != NULL, "failed to get current directory");
6029    return 0;
6030  }
6031
6032  return strlen(buffer);
6033}
6034
6035#ifndef PRODUCT
6036void TestReserveMemorySpecial_test() {
6037  // No tests available for this platform
6038}
6039#endif
6040