os_linux_zero.cpp revision 4558:746b070f5022
1214501Srpaulo/* 2214501Srpaulo * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 3252726Srpaulo * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. 4214501Srpaulo * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5252726Srpaulo * 6252726Srpaulo * This code is free software; you can redistribute it and/or modify it 7214501Srpaulo * under the terms of the GNU General Public License version 2 only, as 8214501Srpaulo * published by the Free Software Foundation. 9214501Srpaulo * 10214501Srpaulo * This code is distributed in the hope that it will be useful, but WITHOUT 11214501Srpaulo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12214501Srpaulo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13252726Srpaulo * version 2 for more details (a copy is included in the LICENSE file that 14214501Srpaulo * accompanied this code). 15214501Srpaulo * 16214501Srpaulo * You should have received a copy of the GNU General Public License version 17214501Srpaulo * 2 along with this work; if not, write to the Free Software Foundation, 18214501Srpaulo * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19214501Srpaulo * 20214501Srpaulo * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21214501Srpaulo * or visit www.oracle.com if you need additional information or have any 22214501Srpaulo * questions. 23214501Srpaulo * 24214501Srpaulo */ 25214501Srpaulo 26214501Srpaulo// no precompiled headers 27214501Srpaulo#include "assembler_zero.inline.hpp" 28214501Srpaulo#include "classfile/classLoader.hpp" 29214501Srpaulo#include "classfile/systemDictionary.hpp" 30214501Srpaulo#include "classfile/vmSymbols.hpp" 31214501Srpaulo#include "code/icBuffer.hpp" 32214501Srpaulo#include "code/vtableStubs.hpp" 33214501Srpaulo#include "interpreter/interpreter.hpp" 34214501Srpaulo#include "jvm_linux.h" 35214501Srpaulo#include "memory/allocation.inline.hpp" 36214501Srpaulo#include "mutex_linux.inline.hpp" 37214501Srpaulo#include "nativeInst_zero.hpp" 38214501Srpaulo#include "os_share_linux.hpp" 39214501Srpaulo#include "prims/jniFastGetField.hpp" 40214501Srpaulo#include "prims/jvm.h" 41214501Srpaulo#include "prims/jvm_misc.hpp" 42214501Srpaulo#include "runtime/arguments.hpp" 43214501Srpaulo#include "runtime/extendedPC.hpp" 44214501Srpaulo#include "runtime/frame.inline.hpp" 45214501Srpaulo#include "runtime/interfaceSupport.hpp" 46214501Srpaulo#include "runtime/java.hpp" 47214501Srpaulo#include "runtime/javaCalls.hpp" 48214501Srpaulo#include "runtime/mutexLocker.hpp" 49214501Srpaulo#include "runtime/osThread.hpp" 50214501Srpaulo#include "runtime/sharedRuntime.hpp" 51214501Srpaulo#include "runtime/stubRoutines.hpp" 52214501Srpaulo#include "runtime/thread.inline.hpp" 53214501Srpaulo#include "runtime/timer.hpp" 54214501Srpaulo#include "utilities/events.hpp" 55214501Srpaulo#include "utilities/vmError.hpp" 56214501Srpaulo 57214501Srpauloaddress os::current_stack_pointer() { 58214501Srpaulo address dummy = (address) &dummy; 59214501Srpaulo return dummy; 60214501Srpaulo} 61214501Srpaulo 62214501Srpauloframe os::get_sender_for_C_frame(frame* fr) { 63214501Srpaulo ShouldNotCallThis(); 64214501Srpaulo} 65214501Srpaulo 66214501Srpauloframe os::current_frame() { 67214501Srpaulo // The only thing that calls this is the stack printing code in 68214501Srpaulo // VMError::report: 69214501Srpaulo // - Step 110 (printing stack bounds) uses the sp in the frame 70214501Srpaulo // to determine the amount of free space on the stack. We 71214501Srpaulo // set the sp to a close approximation of the real value in 72214501Srpaulo // order to allow this step to complete. 73214501Srpaulo // - Step 120 (printing native stack) tries to walk the stack. 74214501Srpaulo // The frame we create has a NULL pc, which is ignored as an 75214501Srpaulo // invalid frame. 76214501Srpaulo frame dummy = frame(); 77214501Srpaulo dummy.set_sp((intptr_t *) current_stack_pointer()); 78214501Srpaulo return dummy; 79214501Srpaulo} 80214501Srpaulo 81214501Srpaulochar* os::non_memory_address_word() { 82214501Srpaulo // Must never look like an address returned by reserve_memory, 83214501Srpaulo // even in its subfields (as defined by the CPU immediate fields, 84214501Srpaulo // if the CPU splits constants across multiple instructions). 85214501Srpaulo#ifdef SPARC 86214501Srpaulo // On SPARC, 0 != %hi(any real address), because there is no 87214501Srpaulo // allocation in the first 1Kb of the virtual address space. 88214501Srpaulo return (char *) 0; 89214501Srpaulo#else 90214501Srpaulo // This is the value for x86; works pretty well for PPC too. 91214501Srpaulo return (char *) -1; 92214501Srpaulo#endif // SPARC 93214501Srpaulo} 94214501Srpaulo 95214501Srpaulovoid os::initialize_thread(Thread * thr){ 96214501Srpaulo // Nothing to do. 97214501Srpaulo} 98214501Srpaulo 99214501Srpauloaddress os::Linux::ucontext_get_pc(ucontext_t* uc) { 100214501Srpaulo ShouldNotCallThis(); 101214501Srpaulo} 102214501Srpaulo 103214501SrpauloExtendedPC os::fetch_frame_from_context(void* ucVoid, 104214501Srpaulo intptr_t** ret_sp, 105214501Srpaulo intptr_t** ret_fp) { 106214501Srpaulo ShouldNotCallThis(); 107214501Srpaulo} 108214501Srpaulo 109214501Srpauloframe os::fetch_frame_from_context(void* ucVoid) { 110214501Srpaulo ShouldNotCallThis(); 111214501Srpaulo} 112214501Srpaulo 113214501Srpauloextern "C" JNIEXPORT int 114214501SrpauloJVM_handle_linux_signal(int sig, 115214501Srpaulo siginfo_t* info, 116214501Srpaulo void* ucVoid, 117214501Srpaulo int abort_if_unrecognized) { 118214501Srpaulo ucontext_t* uc = (ucontext_t*) ucVoid; 119214501Srpaulo 120214501Srpaulo Thread* t = ThreadLocalStorage::get_thread_slow(); 121214501Srpaulo 122214501Srpaulo SignalHandlerMark shm(t); 123214501Srpaulo 124252726Srpaulo // Note: it's not uncommon that JNI code uses signal/sigset to 125214501Srpaulo // install then restore certain signal handler (e.g. to temporarily 126214501Srpaulo // block SIGPIPE, or have a SIGILL handler when detecting CPU 127214501Srpaulo // type). When that happens, JVM_handle_linux_signal() might be 128214501Srpaulo // invoked with junk info/ucVoid. To avoid unnecessary crash when 129214501Srpaulo // libjsig is not preloaded, try handle signals that do not require 130214501Srpaulo // siginfo/ucontext first. 131214501Srpaulo 132214501Srpaulo if (sig == SIGPIPE || sig == SIGXFSZ) { 133214501Srpaulo // allow chained handler to go first 134214501Srpaulo if (os::Linux::chained_handler(sig, info, ucVoid)) { 135214501Srpaulo return true; 136214501Srpaulo } else { 137214501Srpaulo if (PrintMiscellaneous && (WizardMode || Verbose)) { 138214501Srpaulo char buf[64]; 139214501Srpaulo warning("Ignoring %s - see bugs 4229104 or 646499219", 140214501Srpaulo os::exception_name(sig, buf, sizeof(buf))); 141214501Srpaulo } 142214501Srpaulo return true; 143214501Srpaulo } 144214501Srpaulo } 145252726Srpaulo 146252726Srpaulo JavaThread* thread = NULL; 147214501Srpaulo VMThread* vmthread = NULL; 148214501Srpaulo if (os::Linux::signal_handlers_are_installed) { 149214501Srpaulo if (t != NULL ){ 150252726Srpaulo if(t->is_Java_thread()) { 151214501Srpaulo thread = (JavaThread*)t; 152214501Srpaulo } 153214501Srpaulo else if(t->is_VM_thread()){ 154252726Srpaulo vmthread = (VMThread *)t; 155214501Srpaulo } 156214501Srpaulo } 157252726Srpaulo } 158252726Srpaulo 159252726Srpaulo if (info != NULL && thread != NULL) { 160214501Srpaulo // Handle ALL stack overflow variations here 161252726Srpaulo if (sig == SIGSEGV) { 162214501Srpaulo address addr = (address) info->si_addr; 163214501Srpaulo 164214501Srpaulo // check if fault address is within thread stack 165214501Srpaulo if (addr < thread->stack_base() && 166252726Srpaulo addr >= thread->stack_base() - thread->stack_size()) { 167214501Srpaulo // stack overflow 168214501Srpaulo if (thread->in_stack_yellow_zone(addr)) { 169252726Srpaulo thread->disable_stack_yellow_zone(); 170214501Srpaulo ShouldNotCallThis(); 171214501Srpaulo } 172214501Srpaulo else if (thread->in_stack_red_zone(addr)) { 173214501Srpaulo thread->disable_stack_red_zone(); 174214501Srpaulo ShouldNotCallThis(); 175214501Srpaulo } 176214501Srpaulo else { 177214501Srpaulo // Accessing stack address below sp may cause SEGV if 178214501Srpaulo // current thread has MAP_GROWSDOWN stack. This should 179214501Srpaulo // only happen when current thread was created by user 180214501Srpaulo // code with MAP_GROWSDOWN flag and then attached to VM. 181214501Srpaulo // See notes in os_linux.cpp. 182214501Srpaulo if (thread->osthread()->expanding_stack() == 0) { 183214501Srpaulo thread->osthread()->set_expanding_stack(); 184214501Srpaulo if (os::Linux::manually_expand_stack(thread, addr)) { 185252726Srpaulo thread->osthread()->clear_expanding_stack(); 186214501Srpaulo return true; 187214501Srpaulo } 188214501Srpaulo thread->osthread()->clear_expanding_stack(); 189214501Srpaulo } 190214501Srpaulo else { 191214501Srpaulo fatal("recursive segv. expanding stack."); 192214501Srpaulo } 193214501Srpaulo } 194214501Srpaulo } 195214501Srpaulo } 196214501Srpaulo 197214501Srpaulo /*if (thread->thread_state() == _thread_in_Java) { 198214501Srpaulo ShouldNotCallThis(); 199214501Srpaulo } 200214501Srpaulo else*/ if (thread->thread_state() == _thread_in_vm && 201214501Srpaulo sig == SIGBUS && thread->doing_unsafe_access()) { 202214501Srpaulo ShouldNotCallThis(); 203252726Srpaulo } 204214501Srpaulo 205214501Srpaulo // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC 206214501Srpaulo // kicks in and the heap gets shrunk before the field access. 207214501Srpaulo /*if (sig == SIGSEGV || sig == SIGBUS) { 208214501Srpaulo address addr = JNI_FastGetField::find_slowcase_pc(pc); 209214501Srpaulo if (addr != (address)-1) { 210252726Srpaulo stub = addr; 211214501Srpaulo } 212214501Srpaulo }*/ 213214501Srpaulo 214214501Srpaulo // Check to see if we caught the safepoint code in the process 215214501Srpaulo // of write protecting the memory serialization page. It write 216214501Srpaulo // enables the page immediately after protecting it so we can 217214501Srpaulo // just return to retry the write. 218214501Srpaulo if (sig == SIGSEGV && 219214501Srpaulo os::is_memory_serialize_page(thread, (address) info->si_addr)) { 220214501Srpaulo // Block current thread until permission is restored. 221214501Srpaulo os::block_on_serialize_page_trap(); 222214501Srpaulo return true; 223214501Srpaulo } 224214501Srpaulo } 225214501Srpaulo 226214501Srpaulo // signal-chaining 227 if (os::Linux::chained_handler(sig, info, ucVoid)) { 228 return true; 229 } 230 231 if (!abort_if_unrecognized) { 232 // caller wants another chance, so give it to him 233 return false; 234 } 235 236#ifndef PRODUCT 237 if (sig == SIGSEGV) { 238 fatal("\n#" 239 "\n# /--------------------\\" 240 "\n# | segmentation fault |" 241 "\n# \\---\\ /--------------/" 242 "\n# /" 243 "\n# [-] |\\_/| " 244 "\n# (+)=C |o o|__ " 245 "\n# | | =-*-=__\\ " 246 "\n# OOO c_c_(___)"); 247 } 248#endif // !PRODUCT 249 250 const char *fmt = "caught unhandled signal %d"; 251 char buf[64]; 252 253 sprintf(buf, fmt, sig); 254 fatal(buf); 255} 256 257void os::Linux::init_thread_fpu_state(void) { 258 // Nothing to do 259} 260 261int os::Linux::get_fpu_control_word() { 262 ShouldNotCallThis(); 263} 264 265void os::Linux::set_fpu_control_word(int fpu) { 266 ShouldNotCallThis(); 267} 268 269bool os::is_allocatable(size_t bytes) { 270#ifdef _LP64 271 return true; 272#else 273 if (bytes < 2 * G) { 274 return true; 275 } 276 277 char* addr = reserve_memory(bytes, NULL); 278 279 if (addr != NULL) { 280 release_memory(addr, bytes); 281 } 282 283 return addr != NULL; 284#endif // _LP64 285} 286 287/////////////////////////////////////////////////////////////////////////////// 288// thread stack 289 290size_t os::Linux::min_stack_allowed = 64 * K; 291 292bool os::Linux::supports_variable_stack_size() { 293 return true; 294} 295 296size_t os::Linux::default_stack_size(os::ThreadType thr_type) { 297#ifdef _LP64 298 size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); 299#else 300 size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K); 301#endif // _LP64 302 return s; 303} 304 305size_t os::Linux::default_guard_size(os::ThreadType thr_type) { 306 // Only enable glibc guard pages for non-Java threads 307 // (Java threads have HotSpot guard pages) 308 return (thr_type == java_thread ? 0 : page_size()); 309} 310 311static void current_stack_region(address *bottom, size_t *size) { 312 pthread_attr_t attr; 313 int res = pthread_getattr_np(pthread_self(), &attr); 314 if (res != 0) { 315 if (res == ENOMEM) { 316 vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); 317 } 318 else { 319 fatal(err_msg("pthread_getattr_np failed with errno = %d", res)); 320 } 321 } 322 323 address stack_bottom; 324 size_t stack_bytes; 325 res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes); 326 if (res != 0) { 327 fatal(err_msg("pthread_attr_getstack failed with errno = %d", res)); 328 } 329 address stack_top = stack_bottom + stack_bytes; 330 331 // The block of memory returned by pthread_attr_getstack() includes 332 // guard pages where present. We need to trim these off. 333 size_t page_bytes = os::Linux::page_size(); 334 assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack"); 335 336 size_t guard_bytes; 337 res = pthread_attr_getguardsize(&attr, &guard_bytes); 338 if (res != 0) { 339 fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res)); 340 } 341 int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes; 342 assert(guard_bytes == guard_pages * page_bytes, "unaligned guard"); 343 344#ifdef IA64 345 // IA64 has two stacks sharing the same area of memory, a normal 346 // stack growing downwards and a register stack growing upwards. 347 // Guard pages, if present, are in the centre. This code splits 348 // the stack in two even without guard pages, though in theory 349 // there's nothing to stop us allocating more to the normal stack 350 // or more to the register stack if one or the other were found 351 // to grow faster. 352 int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes; 353 stack_bottom += (total_pages - guard_pages) / 2 * page_bytes; 354#endif // IA64 355 356 stack_bottom += guard_bytes; 357 358 pthread_attr_destroy(&attr); 359 360 // The initial thread has a growable stack, and the size reported 361 // by pthread_attr_getstack is the maximum size it could possibly 362 // be given what currently mapped. This can be huge, so we cap it. 363 if (os::Linux::is_initial_thread()) { 364 stack_bytes = stack_top - stack_bottom; 365 366 if (stack_bytes > JavaThread::stack_size_at_create()) 367 stack_bytes = JavaThread::stack_size_at_create(); 368 369 stack_bottom = stack_top - stack_bytes; 370 } 371 372 assert(os::current_stack_pointer() >= stack_bottom, "should do"); 373 assert(os::current_stack_pointer() < stack_top, "should do"); 374 375 *bottom = stack_bottom; 376 *size = stack_top - stack_bottom; 377} 378 379address os::current_stack_base() { 380 address bottom; 381 size_t size; 382 current_stack_region(&bottom, &size); 383 return bottom + size; 384} 385 386size_t os::current_stack_size() { 387 // stack size includes normal stack and HotSpot guard pages 388 address bottom; 389 size_t size; 390 current_stack_region(&bottom, &size); 391 return size; 392} 393 394///////////////////////////////////////////////////////////////////////////// 395// helper functions for fatal error handler 396 397void os::print_context(outputStream* st, void* context) { 398 ShouldNotCallThis(); 399} 400 401void os::print_register_info(outputStream *st, void *context) { 402 ShouldNotCallThis(); 403} 404 405///////////////////////////////////////////////////////////////////////////// 406// Stubs for things that would be in linux_zero.s if it existed. 407// You probably want to disassemble these monkeys to check they're ok. 408 409extern "C" { 410 int SpinPause() { 411 } 412 413 int SafeFetch32(int *adr, int errValue) { 414 int value = errValue; 415 value = *adr; 416 return value; 417 } 418 intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) { 419 intptr_t value = errValue; 420 value = *adr; 421 return value; 422 } 423 424 void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { 425 if (from > to) { 426 jshort *end = from + count; 427 while (from < end) 428 *(to++) = *(from++); 429 } 430 else if (from < to) { 431 jshort *end = from; 432 from += count - 1; 433 to += count - 1; 434 while (from >= end) 435 *(to--) = *(from--); 436 } 437 } 438 void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) { 439 if (from > to) { 440 jint *end = from + count; 441 while (from < end) 442 *(to++) = *(from++); 443 } 444 else if (from < to) { 445 jint *end = from; 446 from += count - 1; 447 to += count - 1; 448 while (from >= end) 449 *(to--) = *(from--); 450 } 451 } 452 void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { 453 if (from > to) { 454 jlong *end = from + count; 455 while (from < end) 456 os::atomic_copy64(from++, to++); 457 } 458 else if (from < to) { 459 jlong *end = from; 460 from += count - 1; 461 to += count - 1; 462 while (from >= end) 463 os::atomic_copy64(from--, to--); 464 } 465 } 466 467 void _Copy_arrayof_conjoint_bytes(HeapWord* from, 468 HeapWord* to, 469 size_t count) { 470 memmove(to, from, count); 471 } 472 void _Copy_arrayof_conjoint_jshorts(HeapWord* from, 473 HeapWord* to, 474 size_t count) { 475 memmove(to, from, count * 2); 476 } 477 void _Copy_arrayof_conjoint_jints(HeapWord* from, 478 HeapWord* to, 479 size_t count) { 480 memmove(to, from, count * 4); 481 } 482 void _Copy_arrayof_conjoint_jlongs(HeapWord* from, 483 HeapWord* to, 484 size_t count) { 485 memmove(to, from, count * 8); 486 } 487}; 488 489///////////////////////////////////////////////////////////////////////////// 490// Implementations of atomic operations not supported by processors. 491// -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html 492 493#ifndef _LP64 494extern "C" { 495 long long unsigned int __sync_val_compare_and_swap_8( 496 volatile void *ptr, 497 long long unsigned int oldval, 498 long long unsigned int newval) { 499 ShouldNotCallThis(); 500 } 501}; 502#endif // !_LP64 503 504#ifndef PRODUCT 505void os::verify_stack_alignment() { 506} 507#endif 508