vm_version_ppc.cpp revision 9801:80f8be586fae
1/* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2012, 2015 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26#include "precompiled.hpp" 27#include "asm/assembler.inline.hpp" 28#include "asm/macroAssembler.inline.hpp" 29#include "compiler/disassembler.hpp" 30#include "memory/resourceArea.hpp" 31#include "runtime/java.hpp" 32#include "runtime/os.hpp" 33#include "runtime/stubCodeGenerator.hpp" 34#include "utilities/defaultStream.hpp" 35#include "utilities/globalDefinitions.hpp" 36#include "vm_version_ppc.hpp" 37 38# include <sys/sysinfo.h> 39 40int VM_Version::_features = VM_Version::unknown_m; 41const char* VM_Version::_features_str = ""; 42bool VM_Version::_is_determine_features_test_running = false; 43 44 45#define MSG(flag) \ 46 if (flag && !FLAG_IS_DEFAULT(flag)) \ 47 jio_fprintf(defaultStream::error_stream(), \ 48 "warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \ 49 " -XX:+" #flag " will be disabled!\n"); 50 51void VM_Version::initialize() { 52 53 // Test which instructions are supported and measure cache line size. 54 determine_features(); 55 56 // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features. 57 if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) { 58 if (VM_Version::has_tcheck() && VM_Version::has_lqarx()) { 59 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8); 60 } else if (VM_Version::has_popcntw()) { 61 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7); 62 } else if (VM_Version::has_cmpb()) { 63 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6); 64 } else if (VM_Version::has_popcntb()) { 65 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5); 66 } else { 67 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0); 68 } 69 } 70 71 bool PowerArchitecturePPC64_ok = false; 72 switch (PowerArchitecturePPC64) { 73 case 8: if (!VM_Version::has_tcheck() ) break; 74 if (!VM_Version::has_lqarx() ) break; 75 case 7: if (!VM_Version::has_popcntw()) break; 76 case 6: if (!VM_Version::has_cmpb() ) break; 77 case 5: if (!VM_Version::has_popcntb()) break; 78 case 0: PowerArchitecturePPC64_ok = true; break; 79 default: break; 80 } 81 guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to " 82 UINTX_FORMAT " on this machine", PowerArchitecturePPC64); 83 84 // Power 8: Configure Data Stream Control Register. 85 if (PowerArchitecturePPC64 >= 8) { 86 config_dscr(); 87 } 88 89 if (!UseSIGTRAP) { 90 MSG(TrapBasedICMissChecks); 91 MSG(TrapBasedNotEntrantChecks); 92 MSG(TrapBasedNullChecks); 93 FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false); 94 FLAG_SET_ERGO(bool, TrapBasedNullChecks, false); 95 FLAG_SET_ERGO(bool, TrapBasedICMissChecks, false); 96 } 97 98#ifdef COMPILER2 99 if (!UseSIGTRAP) { 100 MSG(TrapBasedRangeChecks); 101 FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false); 102 } 103 104 // On Power6 test for section size. 105 if (PowerArchitecturePPC64 == 6) { 106 determine_section_size(); 107 // TODO: PPC port } else { 108 // TODO: PPC port PdScheduling::power6SectorSize = 0x20; 109 } 110 111 MaxVectorSize = 8; 112#endif 113 114 // Create and print feature-string. 115 char buf[(num_features+1) * 16]; // Max 16 chars per feature. 116 jio_snprintf(buf, sizeof(buf), 117 "ppc64%s%s%s%s%s%s%s%s%s%s%s%s", 118 (has_fsqrt() ? " fsqrt" : ""), 119 (has_isel() ? " isel" : ""), 120 (has_lxarxeh() ? " lxarxeh" : ""), 121 (has_cmpb() ? " cmpb" : ""), 122 //(has_mftgpr()? " mftgpr" : ""), 123 (has_popcntb() ? " popcntb" : ""), 124 (has_popcntw() ? " popcntw" : ""), 125 (has_fcfids() ? " fcfids" : ""), 126 (has_vand() ? " vand" : ""), 127 (has_lqarx() ? " lqarx" : ""), 128 (has_vcipher() ? " vcipher" : ""), 129 (has_vpmsumb() ? " vpmsumb" : ""), 130 (has_tcheck() ? " tcheck" : "") 131 // Make sure number of %s matches num_features! 132 ); 133 _features_str = os::strdup(buf); 134 if (Verbose) { 135 print_features(); 136 } 137 138 // PPC64 supports 8-byte compare-exchange operations (see 139 // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr) 140 // and 'atomic long memory ops' (see Unsafe_GetLongVolatile). 141 _supports_cx8 = true; 142 143 // Used by C1. 144 _supports_atomic_getset4 = true; 145 _supports_atomic_getadd4 = true; 146 _supports_atomic_getset8 = true; 147 _supports_atomic_getadd8 = true; 148 149 UseSSE = 0; // Only on x86 and x64 150 151 intx cache_line_size = L1_data_cache_line_size(); 152 153 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1; 154 155 if (AllocatePrefetchStyle == 4) { 156 AllocatePrefetchStepSize = cache_line_size; // Need exact value. 157 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // Use larger blocks by default. 158 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // Default is not defined? 159 } else { 160 if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size; 161 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value. 162 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // Default is not defined? 163 } 164 165 assert(AllocatePrefetchLines > 0, "invalid value"); 166 if (AllocatePrefetchLines < 1) { // Set valid value in product VM. 167 AllocatePrefetchLines = 1; // Conservative value. 168 } 169 170 if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) { 171 AllocatePrefetchStyle = 1; // Fall back if inappropriate. 172 } 173 174 assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive"); 175 176 // Implementation does not use any of the vector instructions 177 // available with Power8. Their exploitation is still pending. 178 if (!UseCRC32Intrinsics) { 179 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 180 FLAG_SET_DEFAULT(UseCRC32Intrinsics, true); 181 } 182 } 183 184 if (UseCRC32CIntrinsics) { 185 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) 186 warning("CRC32C intrinsics are not available on this CPU"); 187 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 188 } 189 190 // The AES intrinsic stubs require AES instruction support. 191 if (UseAES) { 192 warning("AES instructions are not available on this CPU"); 193 FLAG_SET_DEFAULT(UseAES, false); 194 } 195 if (UseAESIntrinsics) { 196 if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) 197 warning("AES intrinsics are not available on this CPU"); 198 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 199 } 200 201 if (UseGHASHIntrinsics) { 202 warning("GHASH intrinsics are not available on this CPU"); 203 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 204 } 205 206 if (UseSHA) { 207 warning("SHA instructions are not available on this CPU"); 208 FLAG_SET_DEFAULT(UseSHA, false); 209 } 210 if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) { 211 warning("SHA intrinsics are not available on this CPU"); 212 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 213 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 214 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 215 } 216 217 if (UseAdler32Intrinsics) { 218 warning("Adler32Intrinsics not available on this CPU."); 219 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 220 } 221 222 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 223 UseMultiplyToLenIntrinsic = true; 224 } 225 226 if (UseVectorizedMismatchIntrinsic) { 227 warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU."); 228 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 229 } 230 231 // Adjust RTM (Restricted Transactional Memory) flags. 232 if (UseRTMLocking) { 233 // If CPU or OS are too old: 234 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 235 // setting during arguments processing. See use_biased_locking(). 236 // VM_Version_init() is executed after UseBiasedLocking is used 237 // in Thread::allocate(). 238 if (!has_tcheck()) { 239 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 240 } 241 bool os_too_old = true; 242#ifdef AIX 243 if (os::Aix::os_version() >= 0x0701031e) { // at least AIX 7.1.3.30 244 os_too_old = false; 245 } 246#endif 247#ifdef linux 248 // TODO: check kernel version (we currently have too old versions only) 249#endif 250 if (os_too_old) { 251 vm_exit_during_initialization("RTM is not supported on this OS version."); 252 } 253 } 254 255 if (UseRTMLocking) { 256#if INCLUDE_RTM_OPT 257 if (!UnlockExperimentalVMOptions) { 258 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. " 259 "It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 260 } else { 261 warning("UseRTMLocking is only available as experimental option on this platform."); 262 } 263 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 264 // RTM locking should be used only for applications with 265 // high lock contention. For now we do not use it by default. 266 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 267 } 268 if (!is_power_of_2(RTMTotalCountIncrRate)) { 269 warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); 270 FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); 271 } 272 if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { 273 warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); 274 FLAG_SET_DEFAULT(RTMAbortRatio, 50); 275 } 276 guarantee(RTMSpinLoopCount > 0, "unsupported"); 277#else 278 // Only C2 does RTM locking optimization. 279 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 280 // setting during arguments processing. See use_biased_locking(). 281 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 282#endif 283 } else { // !UseRTMLocking 284 if (UseRTMForStackLocks) { 285 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 286 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 287 } 288 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 289 } 290 if (UseRTMDeopt) { 291 FLAG_SET_DEFAULT(UseRTMDeopt, false); 292 } 293 if (PrintPreciseRTMLockingStatistics) { 294 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 295 } 296 } 297 298 // This machine allows unaligned memory accesses 299 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 300 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 301 } 302} 303 304bool VM_Version::use_biased_locking() { 305#if INCLUDE_RTM_OPT 306 // RTM locking is most useful when there is high lock contention and 307 // low data contention. With high lock contention the lock is usually 308 // inflated and biased locking is not suitable for that case. 309 // RTM locking code requires that biased locking is off. 310 // Note: we can't switch off UseBiasedLocking in get_processor_features() 311 // because it is used by Thread::allocate() which is called before 312 // VM_Version::initialize(). 313 if (UseRTMLocking && UseBiasedLocking) { 314 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 315 FLAG_SET_DEFAULT(UseBiasedLocking, false); 316 } else { 317 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 318 UseBiasedLocking = false; 319 } 320 } 321#endif 322 return UseBiasedLocking; 323} 324 325void VM_Version::print_features() { 326 tty->print_cr("Version: %s L1_data_cache_line_size=%d", cpu_features(), L1_data_cache_line_size()); 327} 328 329#ifdef COMPILER2 330// Determine section size on power6: If section size is 8 instructions, 331// there should be a difference between the two testloops of ~15 %. If 332// no difference is detected the section is assumed to be 32 instructions. 333void VM_Version::determine_section_size() { 334 335 int unroll = 80; 336 337 const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord; 338 339 // Allocate space for the code. 340 ResourceMark rm; 341 CodeBuffer cb("detect_section_size", code_size, 0); 342 MacroAssembler* a = new MacroAssembler(&cb); 343 344 uint32_t *code = (uint32_t *)a->pc(); 345 // Emit code. 346 void (*test1)() = (void(*)())(void *)a->function_entry(); 347 348 Label l1; 349 350 a->li(R4, 1); 351 a->sldi(R4, R4, 28); 352 a->b(l1); 353 a->align(CodeEntryAlignment); 354 355 a->bind(l1); 356 357 for (int i = 0; i < unroll; i++) { 358 // Schleife 1 359 // ------- sector 0 ------------ 360 // ;; 0 361 a->nop(); // 1 362 a->fpnop0(); // 2 363 a->fpnop1(); // 3 364 a->addi(R4,R4, -1); // 4 365 366 // ;; 1 367 a->nop(); // 5 368 a->fmr(F6, F6); // 6 369 a->fmr(F7, F7); // 7 370 a->endgroup(); // 8 371 // ------- sector 8 ------------ 372 373 // ;; 2 374 a->nop(); // 9 375 a->nop(); // 10 376 a->fmr(F8, F8); // 11 377 a->fmr(F9, F9); // 12 378 379 // ;; 3 380 a->nop(); // 13 381 a->fmr(F10, F10); // 14 382 a->fmr(F11, F11); // 15 383 a->endgroup(); // 16 384 // -------- sector 16 ------------- 385 386 // ;; 4 387 a->nop(); // 17 388 a->nop(); // 18 389 a->fmr(F15, F15); // 19 390 a->fmr(F16, F16); // 20 391 392 // ;; 5 393 a->nop(); // 21 394 a->fmr(F17, F17); // 22 395 a->fmr(F18, F18); // 23 396 a->endgroup(); // 24 397 // ------- sector 24 ------------ 398 399 // ;; 6 400 a->nop(); // 25 401 a->nop(); // 26 402 a->fmr(F19, F19); // 27 403 a->fmr(F20, F20); // 28 404 405 // ;; 7 406 a->nop(); // 29 407 a->fmr(F21, F21); // 30 408 a->fmr(F22, F22); // 31 409 a->brnop0(); // 32 410 411 // ------- sector 32 ------------ 412 } 413 414 // ;; 8 415 a->cmpdi(CCR0, R4, unroll); // 33 416 a->bge(CCR0, l1); // 34 417 a->blr(); 418 419 // Emit code. 420 void (*test2)() = (void(*)())(void *)a->function_entry(); 421 // uint32_t *code = (uint32_t *)a->pc(); 422 423 Label l2; 424 425 a->li(R4, 1); 426 a->sldi(R4, R4, 28); 427 a->b(l2); 428 a->align(CodeEntryAlignment); 429 430 a->bind(l2); 431 432 for (int i = 0; i < unroll; i++) { 433 // Schleife 2 434 // ------- sector 0 ------------ 435 // ;; 0 436 a->brnop0(); // 1 437 a->nop(); // 2 438 //a->cmpdi(CCR0, R4, unroll); 439 a->fpnop0(); // 3 440 a->fpnop1(); // 4 441 a->addi(R4,R4, -1); // 5 442 443 // ;; 1 444 445 a->nop(); // 6 446 a->fmr(F6, F6); // 7 447 a->fmr(F7, F7); // 8 448 // ------- sector 8 --------------- 449 450 // ;; 2 451 a->endgroup(); // 9 452 453 // ;; 3 454 a->nop(); // 10 455 a->nop(); // 11 456 a->fmr(F8, F8); // 12 457 458 // ;; 4 459 a->fmr(F9, F9); // 13 460 a->nop(); // 14 461 a->fmr(F10, F10); // 15 462 463 // ;; 5 464 a->fmr(F11, F11); // 16 465 // -------- sector 16 ------------- 466 467 // ;; 6 468 a->endgroup(); // 17 469 470 // ;; 7 471 a->nop(); // 18 472 a->nop(); // 19 473 a->fmr(F15, F15); // 20 474 475 // ;; 8 476 a->fmr(F16, F16); // 21 477 a->nop(); // 22 478 a->fmr(F17, F17); // 23 479 480 // ;; 9 481 a->fmr(F18, F18); // 24 482 // -------- sector 24 ------------- 483 484 // ;; 10 485 a->endgroup(); // 25 486 487 // ;; 11 488 a->nop(); // 26 489 a->nop(); // 27 490 a->fmr(F19, F19); // 28 491 492 // ;; 12 493 a->fmr(F20, F20); // 29 494 a->nop(); // 30 495 a->fmr(F21, F21); // 31 496 497 // ;; 13 498 a->fmr(F22, F22); // 32 499 } 500 501 // -------- sector 32 ------------- 502 // ;; 14 503 a->cmpdi(CCR0, R4, unroll); // 33 504 a->bge(CCR0, l2); // 34 505 506 a->blr(); 507 uint32_t *code_end = (uint32_t *)a->pc(); 508 a->flush(); 509 510 double loop1_seconds,loop2_seconds, rel_diff; 511 uint64_t start1, stop1; 512 513 start1 = os::current_thread_cpu_time(false); 514 (*test1)(); 515 stop1 = os::current_thread_cpu_time(false); 516 loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0); 517 518 519 start1 = os::current_thread_cpu_time(false); 520 (*test2)(); 521 stop1 = os::current_thread_cpu_time(false); 522 523 loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0); 524 525 rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100; 526 527 if (PrintAssembly) { 528 ttyLocker ttyl; 529 tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code)); 530 Disassembler::decode((u_char*)code, (u_char*)code_end, tty); 531 tty->print_cr("Time loop1 :%f", loop1_seconds); 532 tty->print_cr("Time loop2 :%f", loop2_seconds); 533 tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff); 534 535 if (rel_diff > 12.0) { 536 tty->print_cr("Section Size 8 Instructions"); 537 } else{ 538 tty->print_cr("Section Size 32 Instructions or Power5"); 539 } 540 } 541 542#if 0 // TODO: PPC port 543 // Set sector size (if not set explicitly). 544 if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) { 545 if (rel_diff > 12.0) { 546 PdScheduling::power6SectorSize = 0x20; 547 } else { 548 PdScheduling::power6SectorSize = 0x80; 549 } 550 } else if (Power6SectorSize128PPC64) { 551 PdScheduling::power6SectorSize = 0x80; 552 } else { 553 PdScheduling::power6SectorSize = 0x20; 554 } 555#endif 556 if (UsePower6SchedulerPPC64) Unimplemented(); 557} 558#endif // COMPILER2 559 560void VM_Version::determine_features() { 561#if defined(ABI_ELFv2) 562 // 1 InstWord per call for the blr instruction. 563 const int code_size = (num_features+1+2*1)*BytesPerInstWord; 564#else 565 // 7 InstWords for each call (function descriptor + blr instruction). 566 const int code_size = (num_features+1+2*7)*BytesPerInstWord; 567#endif 568 int features = 0; 569 570 // create test area 571 enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size). 572 char test_area[BUFFER_SIZE]; 573 char *mid_of_test_area = &test_area[BUFFER_SIZE>>1]; 574 575 // Allocate space for the code. 576 ResourceMark rm; 577 CodeBuffer cb("detect_cpu_features", code_size, 0); 578 MacroAssembler* a = new MacroAssembler(&cb); 579 580 // Must be set to true so we can generate the test code. 581 _features = VM_Version::all_features_m; 582 583 // Emit code. 584 void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry(); 585 uint32_t *code = (uint32_t *)a->pc(); 586 // Don't use R0 in ldarx. 587 // Keep R3_ARG1 unmodified, it contains &field (see below). 588 // Keep R4_ARG2 unmodified, it contains offset = 0 (see below). 589 a->fsqrt(F3, F4); // code[0] -> fsqrt_m 590 a->fsqrts(F3, F4); // code[1] -> fsqrts_m 591 a->isel(R7, R5, R6, 0); // code[2] -> isel_m 592 a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m 593 a->cmpb(R7, R5, R6); // code[4] -> cmpb 594 a->popcntb(R7, R5); // code[5] -> popcntb 595 a->popcntw(R7, R5); // code[6] -> popcntw 596 a->fcfids(F3, F4); // code[7] -> fcfids 597 a->vand(VR0, VR0, VR0); // code[8] -> vand 598 // arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16 599 a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m 600 a->vcipher(VR0, VR1, VR2); // code[10] -> vcipher 601 a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb 602 a->tcheck(0); // code[12] -> tcheck 603 a->blr(); 604 605 // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it. 606 void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry(); 607 a->dcbz(R3_ARG1); // R3_ARG1 = addr 608 a->blr(); 609 610 uint32_t *code_end = (uint32_t *)a->pc(); 611 a->flush(); 612 _features = VM_Version::unknown_m; 613 614 // Print the detection code. 615 if (PrintAssembly) { 616 ttyLocker ttyl; 617 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code)); 618 Disassembler::decode((u_char*)code, (u_char*)code_end, tty); 619 } 620 621 // Measure cache line size. 622 memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF. 623 (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle. 624 int count = 0; // count zeroed bytes 625 for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++; 626 guarantee(is_power_of_2(count), "cache line size needs to be a power of 2"); 627 _L1_data_cache_line_size = count; 628 629 // Execute code. Illegal instructions will be replaced by 0 in the signal handler. 630 VM_Version::_is_determine_features_test_running = true; 631 // We must align the first argument to 16 bytes because of the lqarx check. 632 (*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0); 633 VM_Version::_is_determine_features_test_running = false; 634 635 // determine which instructions are legal. 636 int feature_cntr = 0; 637 if (code[feature_cntr++]) features |= fsqrt_m; 638 if (code[feature_cntr++]) features |= fsqrts_m; 639 if (code[feature_cntr++]) features |= isel_m; 640 if (code[feature_cntr++]) features |= lxarxeh_m; 641 if (code[feature_cntr++]) features |= cmpb_m; 642 if (code[feature_cntr++]) features |= popcntb_m; 643 if (code[feature_cntr++]) features |= popcntw_m; 644 if (code[feature_cntr++]) features |= fcfids_m; 645 if (code[feature_cntr++]) features |= vand_m; 646 if (code[feature_cntr++]) features |= lqarx_m; 647 if (code[feature_cntr++]) features |= vcipher_m; 648 if (code[feature_cntr++]) features |= vpmsumb_m; 649 if (code[feature_cntr++]) features |= tcheck_m; 650 651 // Print the detection code. 652 if (PrintAssembly) { 653 ttyLocker ttyl; 654 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code)); 655 Disassembler::decode((u_char*)code, (u_char*)code_end, tty); 656 } 657 658 _features = features; 659} 660 661// Power 8: Configure Data Stream Control Register. 662void VM_Version::config_dscr() { 663 assert(has_tcheck(), "Only execute on Power 8 or later!"); 664 665 // 7 InstWords for each call (function descriptor + blr instruction). 666 const int code_size = (2+2*7)*BytesPerInstWord; 667 668 // Allocate space for the code. 669 ResourceMark rm; 670 CodeBuffer cb("config_dscr", code_size, 0); 671 MacroAssembler* a = new MacroAssembler(&cb); 672 673 // Emit code. 674 uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry(); 675 uint32_t *code = (uint32_t *)a->pc(); 676 a->mfdscr(R3); 677 a->blr(); 678 679 void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry(); 680 a->mtdscr(R3); 681 a->blr(); 682 683 uint32_t *code_end = (uint32_t *)a->pc(); 684 a->flush(); 685 686 // Print the detection code. 687 if (PrintAssembly) { 688 ttyLocker ttyl; 689 tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", p2i(code)); 690 Disassembler::decode((u_char*)code, (u_char*)code_end, tty); 691 } 692 693 // Apply the configuration if needed. 694 uint64_t dscr_val = (*get_dscr)(); 695 if (Verbose) { 696 tty->print_cr("dscr value was 0x%lx" , dscr_val); 697 } 698 bool change_requested = false; 699 if (DSCR_PPC64 != (uintx)-1) { 700 dscr_val = DSCR_PPC64; 701 change_requested = true; 702 } 703 if (DSCR_DPFD_PPC64 <= 7) { 704 uint64_t mask = 0x7; 705 if ((dscr_val & mask) != DSCR_DPFD_PPC64) { 706 dscr_val = (dscr_val & ~mask) | (DSCR_DPFD_PPC64); 707 change_requested = true; 708 } 709 } 710 if (DSCR_URG_PPC64 <= 7) { 711 uint64_t mask = 0x7 << 6; 712 if ((dscr_val & mask) != DSCR_DPFD_PPC64 << 6) { 713 dscr_val = (dscr_val & ~mask) | (DSCR_URG_PPC64 << 6); 714 change_requested = true; 715 } 716 } 717 if (change_requested) { 718 (*set_dscr)(dscr_val); 719 if (Verbose) { 720 tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)()); 721 } 722 } 723} 724 725static int saved_features = 0; 726 727void VM_Version::allow_all() { 728 saved_features = _features; 729 _features = all_features_m; 730} 731 732void VM_Version::revert() { 733 _features = saved_features; 734} 735