os_bsd_zero.cpp revision 3171:da4be62fb889
1/*
2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
27#include <pthread.h>
28# include <pthread_np.h> /* For pthread_attr_get_np */
29#endif
30
31// no precompiled headers
32#include "assembler_zero.inline.hpp"
33#include "classfile/classLoader.hpp"
34#include "classfile/systemDictionary.hpp"
35#include "classfile/vmSymbols.hpp"
36#include "code/icBuffer.hpp"
37#include "code/vtableStubs.hpp"
38#include "interpreter/interpreter.hpp"
39#include "jvm_bsd.h"
40#include "memory/allocation.inline.hpp"
41#include "mutex_bsd.inline.hpp"
42#include "nativeInst_zero.hpp"
43#include "os_share_bsd.hpp"
44#include "prims/jniFastGetField.hpp"
45#include "prims/jvm.h"
46#include "prims/jvm_misc.hpp"
47#include "runtime/arguments.hpp"
48#include "runtime/extendedPC.hpp"
49#include "runtime/frame.inline.hpp"
50#include "runtime/interfaceSupport.hpp"
51#include "runtime/java.hpp"
52#include "runtime/javaCalls.hpp"
53#include "runtime/mutexLocker.hpp"
54#include "runtime/osThread.hpp"
55#include "runtime/sharedRuntime.hpp"
56#include "runtime/stubRoutines.hpp"
57#include "runtime/timer.hpp"
58#include "thread_bsd.inline.hpp"
59#include "utilities/events.hpp"
60#include "utilities/vmError.hpp"
61#ifdef COMPILER1
62#include "c1/c1_Runtime1.hpp"
63#endif
64#ifdef COMPILER2
65#include "opto/runtime.hpp"
66#endif
67
68address os::current_stack_pointer() {
69  address dummy = (address) &dummy;
70  return dummy;
71}
72
73frame os::get_sender_for_C_frame(frame* fr) {
74  ShouldNotCallThis();
75}
76
77frame os::current_frame() {
78  // The only thing that calls this is the stack printing code in
79  // VMError::report:
80  //   - Step 110 (printing stack bounds) uses the sp in the frame
81  //     to determine the amount of free space on the stack.  We
82  //     set the sp to a close approximation of the real value in
83  //     order to allow this step to complete.
84  //   - Step 120 (printing native stack) tries to walk the stack.
85  //     The frame we create has a NULL pc, which is ignored as an
86  //     invalid frame.
87  frame dummy = frame();
88  dummy.set_sp((intptr_t *) current_stack_pointer());
89  return dummy;
90}
91
92char* os::non_memory_address_word() {
93  // Must never look like an address returned by reserve_memory,
94  // even in its subfields (as defined by the CPU immediate fields,
95  // if the CPU splits constants across multiple instructions).
96#ifdef SPARC
97  // On SPARC, 0 != %hi(any real address), because there is no
98  // allocation in the first 1Kb of the virtual address space.
99  return (char *) 0;
100#else
101  // This is the value for x86; works pretty well for PPC too.
102  return (char *) -1;
103#endif // SPARC
104}
105
106void os::initialize_thread() {
107  // Nothing to do.
108}
109
110address os::Bsd::ucontext_get_pc(ucontext_t* uc) {
111  ShouldNotCallThis();
112}
113
114ExtendedPC os::fetch_frame_from_context(void* ucVoid,
115                                        intptr_t** ret_sp,
116                                        intptr_t** ret_fp) {
117  ShouldNotCallThis();
118}
119
120frame os::fetch_frame_from_context(void* ucVoid) {
121  ShouldNotCallThis();
122}
123
124extern "C" JNIEXPORT int
125JVM_handle_bsd_signal(int sig,
126                        siginfo_t* info,
127                        void* ucVoid,
128                        int abort_if_unrecognized) {
129  ucontext_t* uc = (ucontext_t*) ucVoid;
130
131  Thread* t = ThreadLocalStorage::get_thread_slow();
132
133  SignalHandlerMark shm(t);
134
135  // Note: it's not uncommon that JNI code uses signal/sigset to
136  // install then restore certain signal handler (e.g. to temporarily
137  // block SIGPIPE, or have a SIGILL handler when detecting CPU
138  // type). When that happens, JVM_handle_bsd_signal() might be
139  // invoked with junk info/ucVoid. To avoid unnecessary crash when
140  // libjsig is not preloaded, try handle signals that do not require
141  // siginfo/ucontext first.
142
143  if (sig == SIGPIPE || sig == SIGXFSZ) {
144    // allow chained handler to go first
145    if (os::Bsd::chained_handler(sig, info, ucVoid)) {
146      return true;
147    } else {
148      if (PrintMiscellaneous && (WizardMode || Verbose)) {
149        char buf[64];
150        warning("Ignoring %s - see bugs 4229104 or 646499219",
151                os::exception_name(sig, buf, sizeof(buf)));
152      }
153      return true;
154    }
155  }
156
157  JavaThread* thread = NULL;
158  VMThread* vmthread = NULL;
159  if (os::Bsd::signal_handlers_are_installed) {
160    if (t != NULL ){
161      if(t->is_Java_thread()) {
162        thread = (JavaThread*)t;
163      }
164      else if(t->is_VM_thread()){
165        vmthread = (VMThread *)t;
166      }
167    }
168  }
169
170  if (info != NULL && thread != NULL) {
171    // Handle ALL stack overflow variations here
172    if (sig == SIGSEGV || sig == SIGBUS) {
173      address addr = (address) info->si_addr;
174
175      // check if fault address is within thread stack
176      if (addr < thread->stack_base() &&
177          addr >= thread->stack_base() - thread->stack_size()) {
178        // stack overflow
179        if (thread->in_stack_yellow_zone(addr)) {
180          thread->disable_stack_yellow_zone();
181          ShouldNotCallThis();
182        }
183        else if (thread->in_stack_red_zone(addr)) {
184          thread->disable_stack_red_zone();
185          ShouldNotCallThis();
186        }
187#ifndef _ALLBSD_SOURCE
188        else {
189          // Accessing stack address below sp may cause SEGV if
190          // current thread has MAP_GROWSDOWN stack. This should
191          // only happen when current thread was created by user
192          // code with MAP_GROWSDOWN flag and then attached to VM.
193          // See notes in os_bsd.cpp.
194          if (thread->osthread()->expanding_stack() == 0) {
195            thread->osthread()->set_expanding_stack();
196            if (os::Bsd::manually_expand_stack(thread, addr)) {
197              thread->osthread()->clear_expanding_stack();
198              return true;
199            }
200            thread->osthread()->clear_expanding_stack();
201          }
202          else {
203            fatal("recursive segv. expanding stack.");
204          }
205        }
206#endif
207      }
208    }
209
210    /*if (thread->thread_state() == _thread_in_Java) {
211      ShouldNotCallThis();
212    }
213    else*/ if (thread->thread_state() == _thread_in_vm &&
214               sig == SIGBUS && thread->doing_unsafe_access()) {
215      ShouldNotCallThis();
216    }
217
218    // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC
219    // kicks in and the heap gets shrunk before the field access.
220    /*if (sig == SIGSEGV || sig == SIGBUS) {
221      address addr = JNI_FastGetField::find_slowcase_pc(pc);
222      if (addr != (address)-1) {
223        stub = addr;
224      }
225    }*/
226
227    // Check to see if we caught the safepoint code in the process
228    // of write protecting the memory serialization page.  It write
229    // enables the page immediately after protecting it so we can
230    // just return to retry the write.
231    if ((sig == SIGSEGV || sig == SIGBUS) &&
232        os::is_memory_serialize_page(thread, (address) info->si_addr)) {
233      // Block current thread until permission is restored.
234      os::block_on_serialize_page_trap();
235      return true;
236    }
237  }
238
239  // signal-chaining
240  if (os::Bsd::chained_handler(sig, info, ucVoid)) {
241     return true;
242  }
243
244  if (!abort_if_unrecognized) {
245    // caller wants another chance, so give it to him
246    return false;
247  }
248
249#ifndef PRODUCT
250  if (sig == SIGSEGV) {
251    fatal("\n#"
252          "\n#    /--------------------\\"
253          "\n#    | segmentation fault |"
254          "\n#    \\---\\ /--------------/"
255          "\n#        /"
256          "\n#    [-]        |\\_/|    "
257          "\n#    (+)=C      |o o|__  "
258          "\n#    | |        =-*-=__\\ "
259          "\n#    OOO        c_c_(___)");
260  }
261#endif // !PRODUCT
262
263  const char *fmt =
264      "caught unhandled signal " INT32_FORMAT " at address " PTR_FORMAT;
265  char buf[128];
266
267  sprintf(buf, fmt, sig, info->si_addr);
268  fatal(buf);
269}
270
271void os::Bsd::init_thread_fpu_state(void) {
272  // Nothing to do
273}
274
275#ifndef _ALLBSD_SOURCE
276int os::Bsd::get_fpu_control_word() {
277  ShouldNotCallThis();
278}
279
280void os::Bsd::set_fpu_control_word(int fpu) {
281  ShouldNotCallThis();
282}
283#endif
284
285bool os::is_allocatable(size_t bytes) {
286#ifdef _LP64
287  return true;
288#else
289  if (bytes < 2 * G) {
290    return true;
291  }
292
293  char* addr = reserve_memory(bytes, NULL);
294
295  if (addr != NULL) {
296    release_memory(addr, bytes);
297  }
298
299  return addr != NULL;
300#endif // _LP64
301}
302
303///////////////////////////////////////////////////////////////////////////////
304// thread stack
305
306size_t os::Bsd::min_stack_allowed = 64 * K;
307
308bool os::Bsd::supports_variable_stack_size() {
309  return true;
310}
311
312size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
313#ifdef _LP64
314  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
315#else
316  size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
317#endif // _LP64
318  return s;
319}
320
321size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
322  // Only enable glibc guard pages for non-Java threads
323  // (Java threads have HotSpot guard pages)
324  return (thr_type == java_thread ? 0 : page_size());
325}
326
327static void current_stack_region(address *bottom, size_t *size) {
328  address stack_bottom;
329  address stack_top;
330  size_t stack_bytes;
331
332#ifdef __APPLE__
333  pthread_t self = pthread_self();
334  stack_top = (address) pthread_get_stackaddr_np(self);
335  stack_bytes = pthread_get_stacksize_np(self);
336  stack_bottom = stack_top - stack_bytes;
337#elif defined(__OpenBSD__)
338  stack_t ss;
339  int rslt = pthread_stackseg_np(pthread_self(), &ss);
340
341  if (rslt != 0)
342    fatal(err_msg("pthread_stackseg_np failed with err = " INT32_FORMAT,
343          rslt));
344
345  stack_top = (address) ss.ss_sp;
346  stack_bytes  = ss.ss_size;
347  stack_bottom = stack_top - stack_bytes;
348#elif defined(_ALLBSD_SOURCE)
349  pthread_attr_t attr;
350
351  int rslt = pthread_attr_init(&attr);
352
353  // JVM needs to know exact stack location, abort if it fails
354  if (rslt != 0)
355    fatal(err_msg("pthread_attr_init failed with err = " INT32_FORMAT, rslt));
356
357  rslt = pthread_attr_get_np(pthread_self(), &attr);
358
359  if (rslt != 0)
360    fatal(err_msg("pthread_attr_get_np failed with err = " INT32_FORMAT,
361          rslt));
362
363  if (pthread_attr_getstackaddr(&attr, (void **) &stack_bottom) != 0 ||
364      pthread_attr_getstacksize(&attr, &stack_bytes) != 0) {
365    fatal("Can not locate current stack attributes!");
366  }
367
368  pthread_attr_destroy(&attr);
369
370  stack_top = stack_bottom + stack_bytes;
371#else /* Linux */
372  pthread_attr_t attr;
373  int res = pthread_getattr_np(pthread_self(), &attr);
374  if (res != 0) {
375    if (res == ENOMEM) {
376      vm_exit_out_of_memory(0, "pthread_getattr_np");
377    }
378    else {
379      fatal(err_msg("pthread_getattr_np failed with errno = " INT32_FORMAT,
380            res));
381    }
382  }
383
384  res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
385  if (res != 0) {
386    fatal(err_msg("pthread_attr_getstack failed with errno = " INT32_FORMAT,
387          res));
388  }
389  stack_top = stack_bottom + stack_bytes;
390
391  // The block of memory returned by pthread_attr_getstack() includes
392  // guard pages where present.  We need to trim these off.
393  size_t page_bytes = os::Bsd::page_size();
394  assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
395
396  size_t guard_bytes;
397  res = pthread_attr_getguardsize(&attr, &guard_bytes);
398  if (res != 0) {
399    fatal(err_msg(
400        "pthread_attr_getguardsize failed with errno = " INT32_FORMAT, res));
401  }
402  int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
403  assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
404
405#ifdef IA64
406  // IA64 has two stacks sharing the same area of memory, a normal
407  // stack growing downwards and a register stack growing upwards.
408  // Guard pages, if present, are in the centre.  This code splits
409  // the stack in two even without guard pages, though in theory
410  // there's nothing to stop us allocating more to the normal stack
411  // or more to the register stack if one or the other were found
412  // to grow faster.
413  int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
414  stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
415#endif // IA64
416
417  stack_bottom += guard_bytes;
418
419  pthread_attr_destroy(&attr);
420
421  // The initial thread has a growable stack, and the size reported
422  // by pthread_attr_getstack is the maximum size it could possibly
423  // be given what currently mapped.  This can be huge, so we cap it.
424  if (os::Bsd::is_initial_thread()) {
425    stack_bytes = stack_top - stack_bottom;
426
427    if (stack_bytes > JavaThread::stack_size_at_create())
428      stack_bytes = JavaThread::stack_size_at_create();
429
430    stack_bottom = stack_top - stack_bytes;
431  }
432#endif
433
434  assert(os::current_stack_pointer() >= stack_bottom, "should do");
435  assert(os::current_stack_pointer() < stack_top, "should do");
436
437  *bottom = stack_bottom;
438  *size = stack_top - stack_bottom;
439}
440
441address os::current_stack_base() {
442  address bottom;
443  size_t size;
444  current_stack_region(&bottom, &size);
445  return bottom + size;
446}
447
448size_t os::current_stack_size() {
449  // stack size includes normal stack and HotSpot guard pages
450  address bottom;
451  size_t size;
452  current_stack_region(&bottom, &size);
453  return size;
454}
455
456/////////////////////////////////////////////////////////////////////////////
457// helper functions for fatal error handler
458
459void os::print_context(outputStream* st, void* context) {
460  ShouldNotCallThis();
461}
462
463void os::print_register_info(outputStream *st, void *context) {
464  ShouldNotCallThis();
465}
466
467/////////////////////////////////////////////////////////////////////////////
468// Stubs for things that would be in bsd_zero.s if it existed.
469// You probably want to disassemble these monkeys to check they're ok.
470
471extern "C" {
472  int SpinPause() {
473  }
474
475  int SafeFetch32(int *adr, int errValue) {
476    int value = errValue;
477    value = *adr;
478    return value;
479  }
480  intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
481    intptr_t value = errValue;
482    value = *adr;
483    return value;
484  }
485
486  void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
487    if (from > to) {
488      jshort *end = from + count;
489      while (from < end)
490        *(to++) = *(from++);
491    }
492    else if (from < to) {
493      jshort *end = from;
494      from += count - 1;
495      to   += count - 1;
496      while (from >= end)
497        *(to--) = *(from--);
498    }
499  }
500  void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
501    if (from > to) {
502      jint *end = from + count;
503      while (from < end)
504        *(to++) = *(from++);
505    }
506    else if (from < to) {
507      jint *end = from;
508      from += count - 1;
509      to   += count - 1;
510      while (from >= end)
511        *(to--) = *(from--);
512    }
513  }
514  void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
515    if (from > to) {
516      jlong *end = from + count;
517      while (from < end)
518        os::atomic_copy64(from++, to++);
519    }
520    else if (from < to) {
521      jlong *end = from;
522      from += count - 1;
523      to   += count - 1;
524      while (from >= end)
525        os::atomic_copy64(from--, to--);
526    }
527  }
528
529  void _Copy_arrayof_conjoint_bytes(HeapWord* from,
530                                    HeapWord* to,
531                                    size_t    count) {
532    memmove(to, from, count);
533  }
534  void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
535                                      HeapWord* to,
536                                      size_t    count) {
537    memmove(to, from, count * 2);
538  }
539  void _Copy_arrayof_conjoint_jints(HeapWord* from,
540                                    HeapWord* to,
541                                    size_t    count) {
542    memmove(to, from, count * 4);
543  }
544  void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
545                                     HeapWord* to,
546                                     size_t    count) {
547    memmove(to, from, count * 8);
548  }
549};
550
551/////////////////////////////////////////////////////////////////////////////
552// Implementations of atomic operations not supported by processors.
553//  -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html
554
555#ifndef _LP64
556extern "C" {
557  long long unsigned int __sync_val_compare_and_swap_8(
558    volatile void *ptr,
559    long long unsigned int oldval,
560    long long unsigned int newval) {
561    ShouldNotCallThis();
562  }
563};
564#endif // !_LP64
565
566#ifndef PRODUCT
567void os::verify_stack_alignment() {
568}
569#endif
570