vm_version_ppc.cpp revision 11658:8a5735c11a84
1243883Sbapt/*
2243883Sbapt * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3243883Sbapt * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
4243883Sbapt * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5243883Sbapt *
6243883Sbapt * This code is free software; you can redistribute it and/or modify it
7243883Sbapt * under the terms of the GNU General Public License version 2 only, as
8243883Sbapt * published by the Free Software Foundation.
9243883Sbapt *
10243883Sbapt * This code is distributed in the hope that it will be useful, but WITHOUT
11243883Sbapt * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12243883Sbapt * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13243883Sbapt * version 2 for more details (a copy is included in the LICENSE file that
14243883Sbapt * accompanied this code).
15243883Sbapt *
16243883Sbapt * You should have received a copy of the GNU General Public License version
17243883Sbapt * 2 along with this work; if not, write to the Free Software Foundation,
18243883Sbapt * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19243883Sbapt *
20243883Sbapt * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21243883Sbapt * or visit www.oracle.com if you need additional information or have any
22243883Sbapt * questions.
23243883Sbapt *
24243883Sbapt */
25243883Sbapt
26243883Sbapt#include "precompiled.hpp"
27243883Sbapt#include "asm/assembler.inline.hpp"
28243883Sbapt#include "asm/macroAssembler.inline.hpp"
29243883Sbapt#include "compiler/disassembler.hpp"
30243883Sbapt#include "memory/resourceArea.hpp"
31243883Sbapt#include "runtime/java.hpp"
32243883Sbapt#include "runtime/os.hpp"
33243883Sbapt#include "runtime/stubCodeGenerator.hpp"
34243883Sbapt#include "utilities/defaultStream.hpp"
35243883Sbapt#include "utilities/globalDefinitions.hpp"
36243883Sbapt#include "vm_version_ppc.hpp"
37243883Sbapt
38257309Sbapt# include <sys/sysinfo.h>
39243883Sbapt
40243883Sbaptbool VM_Version::_is_determine_features_test_running = false;
41243883Sbaptuint64_t VM_Version::_dscr_val = 0;
42243883Sbapt
43243883Sbapt#define MSG(flag)   \
44243883Sbapt  if (flag && !FLAG_IS_DEFAULT(flag))                                  \
45243883Sbapt      jio_fprintf(defaultStream::error_stream(),                       \
46243883Sbapt                  "warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \
47                  "         -XX:+" #flag " will be disabled!\n");
48
49void VM_Version::initialize() {
50
51  // Test which instructions are supported and measure cache line size.
52  determine_features();
53
54  // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
55  if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
56    if (VM_Version::has_lqarx()) {
57      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
58    } else if (VM_Version::has_popcntw()) {
59      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
60    } else if (VM_Version::has_cmpb()) {
61      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
62    } else if (VM_Version::has_popcntb()) {
63      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
64    } else {
65      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
66    }
67  }
68
69  bool PowerArchitecturePPC64_ok = false;
70  switch (PowerArchitecturePPC64) {
71    case 8: if (!VM_Version::has_lqarx()  ) break;
72    case 7: if (!VM_Version::has_popcntw()) break;
73    case 6: if (!VM_Version::has_cmpb()   ) break;
74    case 5: if (!VM_Version::has_popcntb()) break;
75    case 0: PowerArchitecturePPC64_ok = true; break;
76    default: break;
77  }
78  guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to "
79            UINTX_FORMAT " on this machine", PowerArchitecturePPC64);
80
81  // Power 8: Configure Data Stream Control Register.
82  if (has_mfdscr()) {
83    config_dscr();
84  }
85
86  if (!UseSIGTRAP) {
87    MSG(TrapBasedICMissChecks);
88    MSG(TrapBasedNotEntrantChecks);
89    MSG(TrapBasedNullChecks);
90    FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
91    FLAG_SET_ERGO(bool, TrapBasedNullChecks,       false);
92    FLAG_SET_ERGO(bool, TrapBasedICMissChecks,     false);
93  }
94
95#ifdef COMPILER2
96  if (!UseSIGTRAP) {
97    MSG(TrapBasedRangeChecks);
98    FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false);
99  }
100
101  // On Power6 test for section size.
102  if (PowerArchitecturePPC64 == 6) {
103    determine_section_size();
104  // TODO: PPC port } else {
105  // TODO: PPC port PdScheduling::power6SectorSize = 0x20;
106  }
107
108  MaxVectorSize = 8;
109#endif
110
111  // Create and print feature-string.
112  char buf[(num_features+1) * 16]; // Max 16 chars per feature.
113  jio_snprintf(buf, sizeof(buf),
114               "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
115               (has_fsqrt()   ? " fsqrt"   : ""),
116               (has_isel()    ? " isel"    : ""),
117               (has_lxarxeh() ? " lxarxeh" : ""),
118               (has_cmpb()    ? " cmpb"    : ""),
119               //(has_mftgpr()? " mftgpr"  : ""),
120               (has_popcntb() ? " popcntb" : ""),
121               (has_popcntw() ? " popcntw" : ""),
122               (has_fcfids()  ? " fcfids"  : ""),
123               (has_vand()    ? " vand"    : ""),
124               (has_lqarx()   ? " lqarx"   : ""),
125               (has_vcipher() ? " aes"     : ""),
126               (has_vpmsumb() ? " vpmsumb" : ""),
127               (has_tcheck()  ? " tcheck"  : ""),
128               (has_mfdscr()  ? " mfdscr"  : ""),
129               (has_vsx()     ? " vsx"     : "")
130               // Make sure number of %s matches num_features!
131              );
132  _features_string = os::strdup(buf);
133  if (Verbose) {
134    print_features();
135  }
136
137  // PPC64 supports 8-byte compare-exchange operations (see
138  // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
139  // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
140  _supports_cx8 = true;
141
142  // Used by C1.
143  _supports_atomic_getset4 = true;
144  _supports_atomic_getadd4 = true;
145  _supports_atomic_getset8 = true;
146  _supports_atomic_getadd8 = true;
147
148  UseSSE = 0; // Only on x86 and x64
149
150  intx cache_line_size = L1_data_cache_line_size();
151
152  if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1;
153
154  if (AllocatePrefetchStyle == 4) {
155    AllocatePrefetchStepSize = cache_line_size; // Need exact value.
156    if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // Use larger blocks by default.
157    if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // Default is not defined?
158  } else {
159    if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size;
160    if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value.
161    if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // Default is not defined?
162  }
163
164  assert(AllocatePrefetchLines > 0, "invalid value");
165  if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
166    AllocatePrefetchLines = 1; // Conservative value.
167  }
168
169  if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
170    AllocatePrefetchStyle = 1; // Fall back if inappropriate.
171  }
172
173  assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
174
175  // Implementation does not use any of the vector instructions
176  // available with Power8. Their exploitation is still pending.
177  if (!UseCRC32Intrinsics) {
178    if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
179      FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
180    }
181  }
182
183  if (UseCRC32CIntrinsics) {
184    if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
185      warning("CRC32C intrinsics are not available on this CPU");
186    FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
187  }
188
189  // The AES intrinsic stubs require AES instruction support.
190#if defined(VM_LITTLE_ENDIAN)
191  if (has_vcipher()) {
192    if (FLAG_IS_DEFAULT(UseAES)) {
193      UseAES = true;
194    }
195  } else if (UseAES) {
196    if (!FLAG_IS_DEFAULT(UseAES))
197      warning("AES instructions are not available on this CPU");
198    FLAG_SET_DEFAULT(UseAES, false);
199  }
200
201  if (UseAES && has_vcipher()) {
202    if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
203      UseAESIntrinsics = true;
204    }
205  } else if (UseAESIntrinsics) {
206    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
207      warning("AES intrinsics are not available on this CPU");
208    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
209  }
210
211#else
212  if (UseAES) {
213    warning("AES instructions are not available on this CPU");
214    FLAG_SET_DEFAULT(UseAES, false);
215  }
216  if (UseAESIntrinsics) {
217    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
218      warning("AES intrinsics are not available on this CPU");
219    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
220  }
221#endif
222
223  if (UseAESCTRIntrinsics) {
224    warning("AES/CTR intrinsics are not available on this CPU");
225    FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
226  }
227
228  if (UseGHASHIntrinsics) {
229    warning("GHASH intrinsics are not available on this CPU");
230    FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
231  }
232
233  if (UseSHA) {
234    warning("SHA instructions are not available on this CPU");
235    FLAG_SET_DEFAULT(UseSHA, false);
236  }
237  if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
238    warning("SHA intrinsics are not available on this CPU");
239    FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
240    FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
241    FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
242  }
243
244  if (UseAdler32Intrinsics) {
245    warning("Adler32Intrinsics not available on this CPU.");
246    FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
247  }
248
249  if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
250    UseMultiplyToLenIntrinsic = true;
251  }
252  if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
253    UseMontgomeryMultiplyIntrinsic = true;
254  }
255  if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
256    UseMontgomerySquareIntrinsic = true;
257  }
258
259  if (UseVectorizedMismatchIntrinsic) {
260    warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
261    FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
262  }
263
264
265  // Adjust RTM (Restricted Transactional Memory) flags.
266  if (UseRTMLocking) {
267    // If CPU or OS are too old:
268    // Can't continue because UseRTMLocking affects UseBiasedLocking flag
269    // setting during arguments processing. See use_biased_locking().
270    // VM_Version_init() is executed after UseBiasedLocking is used
271    // in Thread::allocate().
272    if (!has_tcheck()) {
273      vm_exit_during_initialization("RTM instructions are not available on this CPU");
274    }
275    bool os_too_old = true;
276#ifdef AIX
277    if (os::Aix::os_version() >= 0x0701031e) { // at least AIX 7.1.3.30
278      os_too_old = false;
279    }
280#endif
281#ifdef LINUX
282    // At least Linux kernel 4.2, as the problematic behavior of syscalls
283    // being called in the middle of a transaction has been addressed.
284    // Please, refer to commit b4b56f9ecab40f3b4ef53e130c9f6663be491894
285    // in Linux kernel source tree: https://goo.gl/Kc5i7A
286    if (os::Linux::os_version_is_known()) {
287      if (os::Linux::os_version() >= 0x040200)
288        os_too_old = false;
289    } else {
290      vm_exit_during_initialization("RTM can not be enabled: kernel version is unknown.");
291    }
292#endif
293    if (os_too_old) {
294      vm_exit_during_initialization("RTM is not supported on this OS version.");
295    }
296  }
297
298  if (UseRTMLocking) {
299#if INCLUDE_RTM_OPT
300    if (!UnlockExperimentalVMOptions) {
301      vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. "
302                                    "It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
303    } else {
304      warning("UseRTMLocking is only available as experimental option on this platform.");
305    }
306    if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
307      // RTM locking should be used only for applications with
308      // high lock contention. For now we do not use it by default.
309      vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
310    }
311    if (!is_power_of_2(RTMTotalCountIncrRate)) {
312      warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
313      FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
314    }
315    if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
316      warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
317      FLAG_SET_DEFAULT(RTMAbortRatio, 50);
318    }
319    guarantee(RTMSpinLoopCount > 0, "unsupported");
320#else
321    // Only C2 does RTM locking optimization.
322    // Can't continue because UseRTMLocking affects UseBiasedLocking flag
323    // setting during arguments processing. See use_biased_locking().
324    vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
325#endif
326  } else { // !UseRTMLocking
327    if (UseRTMForStackLocks) {
328      if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
329        warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
330      }
331      FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
332    }
333    if (UseRTMDeopt) {
334      FLAG_SET_DEFAULT(UseRTMDeopt, false);
335    }
336    if (PrintPreciseRTMLockingStatistics) {
337      FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
338    }
339  }
340
341  // This machine allows unaligned memory accesses
342  if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
343    FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
344  }
345}
346
347bool VM_Version::use_biased_locking() {
348#if INCLUDE_RTM_OPT
349  // RTM locking is most useful when there is high lock contention and
350  // low data contention. With high lock contention the lock is usually
351  // inflated and biased locking is not suitable for that case.
352  // RTM locking code requires that biased locking is off.
353  // Note: we can't switch off UseBiasedLocking in get_processor_features()
354  // because it is used by Thread::allocate() which is called before
355  // VM_Version::initialize().
356  if (UseRTMLocking && UseBiasedLocking) {
357    if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
358      FLAG_SET_DEFAULT(UseBiasedLocking, false);
359    } else {
360      warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
361      UseBiasedLocking = false;
362    }
363  }
364#endif
365  return UseBiasedLocking;
366}
367
368void VM_Version::print_features() {
369  tty->print_cr("Version: %s L1_data_cache_line_size=%d", features_string(), L1_data_cache_line_size());
370}
371
372#ifdef COMPILER2
373// Determine section size on power6: If section size is 8 instructions,
374// there should be a difference between the two testloops of ~15 %. If
375// no difference is detected the section is assumed to be 32 instructions.
376void VM_Version::determine_section_size() {
377
378  int unroll = 80;
379
380  const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord;
381
382  // Allocate space for the code.
383  ResourceMark rm;
384  CodeBuffer cb("detect_section_size", code_size, 0);
385  MacroAssembler* a = new MacroAssembler(&cb);
386
387  uint32_t *code = (uint32_t *)a->pc();
388  // Emit code.
389  void (*test1)() = (void(*)())(void *)a->function_entry();
390
391  Label l1;
392
393  a->li(R4, 1);
394  a->sldi(R4, R4, 28);
395  a->b(l1);
396  a->align(CodeEntryAlignment);
397
398  a->bind(l1);
399
400  for (int i = 0; i < unroll; i++) {
401    // Schleife 1
402    // ------- sector 0 ------------
403    // ;; 0
404    a->nop();                   // 1
405    a->fpnop0();                // 2
406    a->fpnop1();                // 3
407    a->addi(R4,R4, -1); // 4
408
409    // ;;  1
410    a->nop();                   // 5
411    a->fmr(F6, F6);             // 6
412    a->fmr(F7, F7);             // 7
413    a->endgroup();              // 8
414    // ------- sector 8 ------------
415
416    // ;;  2
417    a->nop();                   // 9
418    a->nop();                   // 10
419    a->fmr(F8, F8);             // 11
420    a->fmr(F9, F9);             // 12
421
422    // ;;  3
423    a->nop();                   // 13
424    a->fmr(F10, F10);           // 14
425    a->fmr(F11, F11);           // 15
426    a->endgroup();              // 16
427    // -------- sector 16 -------------
428
429    // ;;  4
430    a->nop();                   // 17
431    a->nop();                   // 18
432    a->fmr(F15, F15);           // 19
433    a->fmr(F16, F16);           // 20
434
435    // ;;  5
436    a->nop();                   // 21
437    a->fmr(F17, F17);           // 22
438    a->fmr(F18, F18);           // 23
439    a->endgroup();              // 24
440    // ------- sector 24  ------------
441
442    // ;;  6
443    a->nop();                   // 25
444    a->nop();                   // 26
445    a->fmr(F19, F19);           // 27
446    a->fmr(F20, F20);           // 28
447
448    // ;;  7
449    a->nop();                   // 29
450    a->fmr(F21, F21);           // 30
451    a->fmr(F22, F22);           // 31
452    a->brnop0();                // 32
453
454    // ------- sector 32 ------------
455  }
456
457  // ;; 8
458  a->cmpdi(CCR0, R4, unroll);   // 33
459  a->bge(CCR0, l1);             // 34
460  a->blr();
461
462  // Emit code.
463  void (*test2)() = (void(*)())(void *)a->function_entry();
464  // uint32_t *code = (uint32_t *)a->pc();
465
466  Label l2;
467
468  a->li(R4, 1);
469  a->sldi(R4, R4, 28);
470  a->b(l2);
471  a->align(CodeEntryAlignment);
472
473  a->bind(l2);
474
475  for (int i = 0; i < unroll; i++) {
476    // Schleife 2
477    // ------- sector 0 ------------
478    // ;; 0
479    a->brnop0();                  // 1
480    a->nop();                     // 2
481    //a->cmpdi(CCR0, R4, unroll);
482    a->fpnop0();                  // 3
483    a->fpnop1();                  // 4
484    a->addi(R4,R4, -1);           // 5
485
486    // ;; 1
487
488    a->nop();                     // 6
489    a->fmr(F6, F6);               // 7
490    a->fmr(F7, F7);               // 8
491    // ------- sector 8 ---------------
492
493    // ;; 2
494    a->endgroup();                // 9
495
496    // ;; 3
497    a->nop();                     // 10
498    a->nop();                     // 11
499    a->fmr(F8, F8);               // 12
500
501    // ;; 4
502    a->fmr(F9, F9);               // 13
503    a->nop();                     // 14
504    a->fmr(F10, F10);             // 15
505
506    // ;; 5
507    a->fmr(F11, F11);             // 16
508    // -------- sector 16 -------------
509
510    // ;; 6
511    a->endgroup();                // 17
512
513    // ;; 7
514    a->nop();                     // 18
515    a->nop();                     // 19
516    a->fmr(F15, F15);             // 20
517
518    // ;; 8
519    a->fmr(F16, F16);             // 21
520    a->nop();                     // 22
521    a->fmr(F17, F17);             // 23
522
523    // ;; 9
524    a->fmr(F18, F18);             // 24
525    // -------- sector 24 -------------
526
527    // ;; 10
528    a->endgroup();                // 25
529
530    // ;; 11
531    a->nop();                     // 26
532    a->nop();                     // 27
533    a->fmr(F19, F19);             // 28
534
535    // ;; 12
536    a->fmr(F20, F20);             // 29
537    a->nop();                     // 30
538    a->fmr(F21, F21);             // 31
539
540    // ;; 13
541    a->fmr(F22, F22);             // 32
542  }
543
544  // -------- sector 32 -------------
545  // ;; 14
546  a->cmpdi(CCR0, R4, unroll); // 33
547  a->bge(CCR0, l2);           // 34
548
549  a->blr();
550  uint32_t *code_end = (uint32_t *)a->pc();
551  a->flush();
552
553  double loop1_seconds,loop2_seconds, rel_diff;
554  uint64_t start1, stop1;
555
556  start1 = os::current_thread_cpu_time(false);
557  (*test1)();
558  stop1 = os::current_thread_cpu_time(false);
559  loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0);
560
561
562  start1 = os::current_thread_cpu_time(false);
563  (*test2)();
564  stop1 = os::current_thread_cpu_time(false);
565
566  loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0);
567
568  rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100;
569
570  if (PrintAssembly) {
571    ttyLocker ttyl;
572    tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
573    Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
574    tty->print_cr("Time loop1 :%f", loop1_seconds);
575    tty->print_cr("Time loop2 :%f", loop2_seconds);
576    tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff);
577
578    if (rel_diff > 12.0) {
579      tty->print_cr("Section Size 8 Instructions");
580    } else{
581      tty->print_cr("Section Size 32 Instructions or Power5");
582    }
583  }
584
585#if 0 // TODO: PPC port
586  // Set sector size (if not set explicitly).
587  if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) {
588    if (rel_diff > 12.0) {
589      PdScheduling::power6SectorSize = 0x20;
590    } else {
591      PdScheduling::power6SectorSize = 0x80;
592    }
593  } else if (Power6SectorSize128PPC64) {
594    PdScheduling::power6SectorSize = 0x80;
595  } else {
596    PdScheduling::power6SectorSize = 0x20;
597  }
598#endif
599  if (UsePower6SchedulerPPC64) Unimplemented();
600}
601#endif // COMPILER2
602
603void VM_Version::determine_features() {
604#if defined(ABI_ELFv2)
605  // 1 InstWord per call for the blr instruction.
606  const int code_size = (num_features+1+2*1)*BytesPerInstWord;
607#else
608  // 7 InstWords for each call (function descriptor + blr instruction).
609  const int code_size = (num_features+1+2*7)*BytesPerInstWord;
610#endif
611  int features = 0;
612
613  // create test area
614  enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size).
615  char test_area[BUFFER_SIZE];
616  char *mid_of_test_area = &test_area[BUFFER_SIZE>>1];
617
618  // Allocate space for the code.
619  ResourceMark rm;
620  CodeBuffer cb("detect_cpu_features", code_size, 0);
621  MacroAssembler* a = new MacroAssembler(&cb);
622
623  // Must be set to true so we can generate the test code.
624  _features = VM_Version::all_features_m;
625
626  // Emit code.
627  void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
628  uint32_t *code = (uint32_t *)a->pc();
629  // Don't use R0 in ldarx.
630  // Keep R3_ARG1 unmodified, it contains &field (see below).
631  // Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
632  a->fsqrt(F3, F4);                            // code[0]  -> fsqrt_m
633  a->fsqrts(F3, F4);                           // code[1]  -> fsqrts_m
634  a->isel(R7, R5, R6, 0);                      // code[2]  -> isel_m
635  a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3]  -> lxarx_m
636  a->cmpb(R7, R5, R6);                         // code[4]  -> cmpb
637  a->popcntb(R7, R5);                          // code[5]  -> popcntb
638  a->popcntw(R7, R5);                          // code[6]  -> popcntw
639  a->fcfids(F3, F4);                           // code[7]  -> fcfids
640  a->vand(VR0, VR0, VR0);                      // code[8]  -> vand
641  // arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16
642  a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9]  -> lqarx_m
643  a->vcipher(VR0, VR1, VR2);                   // code[10] -> vcipher
644  a->vpmsumb(VR0, VR1, VR2);                   // code[11] -> vpmsumb
645  a->tcheck(0);                                // code[12] -> tcheck
646  a->mfdscr(R0);                               // code[13] -> mfdscr
647  a->lxvd2x(VSR0, 0, R3_ARG1);                 // code[14] -> vsx
648  a->blr();
649
650  // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
651  void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry();
652  a->dcbz(R3_ARG1); // R3_ARG1 = addr
653  a->blr();
654
655  uint32_t *code_end = (uint32_t *)a->pc();
656  a->flush();
657  _features = VM_Version::unknown_m;
658
659  // Print the detection code.
660  if (PrintAssembly) {
661    ttyLocker ttyl;
662    tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
663    Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
664  }
665
666  // Measure cache line size.
667  memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF.
668  (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle.
669  int count = 0; // count zeroed bytes
670  for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++;
671  guarantee(is_power_of_2(count), "cache line size needs to be a power of 2");
672  _L1_data_cache_line_size = count;
673
674  // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
675  VM_Version::_is_determine_features_test_running = true;
676  // We must align the first argument to 16 bytes because of the lqarx check.
677  (*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0);
678  VM_Version::_is_determine_features_test_running = false;
679
680  // determine which instructions are legal.
681  int feature_cntr = 0;
682  if (code[feature_cntr++]) features |= fsqrt_m;
683  if (code[feature_cntr++]) features |= fsqrts_m;
684  if (code[feature_cntr++]) features |= isel_m;
685  if (code[feature_cntr++]) features |= lxarxeh_m;
686  if (code[feature_cntr++]) features |= cmpb_m;
687  if (code[feature_cntr++]) features |= popcntb_m;
688  if (code[feature_cntr++]) features |= popcntw_m;
689  if (code[feature_cntr++]) features |= fcfids_m;
690  if (code[feature_cntr++]) features |= vand_m;
691  if (code[feature_cntr++]) features |= lqarx_m;
692  if (code[feature_cntr++]) features |= vcipher_m;
693  if (code[feature_cntr++]) features |= vpmsumb_m;
694  if (code[feature_cntr++]) features |= tcheck_m;
695  if (code[feature_cntr++]) features |= mfdscr_m;
696  if (code[feature_cntr++]) features |= vsx_m;
697
698  // Print the detection code.
699  if (PrintAssembly) {
700    ttyLocker ttyl;
701    tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code));
702    Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
703  }
704
705  _features = features;
706}
707
708// Power 8: Configure Data Stream Control Register.
709void VM_Version::config_dscr() {
710  // 7 InstWords for each call (function descriptor + blr instruction).
711  const int code_size = (2+2*7)*BytesPerInstWord;
712
713  // Allocate space for the code.
714  ResourceMark rm;
715  CodeBuffer cb("config_dscr", code_size, 0);
716  MacroAssembler* a = new MacroAssembler(&cb);
717
718  // Emit code.
719  uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
720  uint32_t *code = (uint32_t *)a->pc();
721  a->mfdscr(R3);
722  a->blr();
723
724  void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
725  a->mtdscr(R3);
726  a->blr();
727
728  uint32_t *code_end = (uint32_t *)a->pc();
729  a->flush();
730
731  // Print the detection code.
732  if (PrintAssembly) {
733    ttyLocker ttyl;
734    tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", p2i(code));
735    Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
736  }
737
738  // Apply the configuration if needed.
739  _dscr_val = (*get_dscr)();
740  if (Verbose) {
741    tty->print_cr("dscr value was 0x%lx" , _dscr_val);
742  }
743  bool change_requested = false;
744  if (DSCR_PPC64 != (uintx)-1) {
745    _dscr_val = DSCR_PPC64;
746    change_requested = true;
747  }
748  if (DSCR_DPFD_PPC64 <= 7) {
749    uint64_t mask = 0x7;
750    if ((_dscr_val & mask) != DSCR_DPFD_PPC64) {
751      _dscr_val = (_dscr_val & ~mask) | (DSCR_DPFD_PPC64);
752      change_requested = true;
753    }
754  }
755  if (DSCR_URG_PPC64 <= 7) {
756    uint64_t mask = 0x7 << 6;
757    if ((_dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
758      _dscr_val = (_dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
759      change_requested = true;
760    }
761  }
762  if (change_requested) {
763    (*set_dscr)(_dscr_val);
764    if (Verbose) {
765      tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)());
766    }
767  }
768}
769
770static uint64_t saved_features = 0;
771
772void VM_Version::allow_all() {
773  saved_features = _features;
774  _features      = all_features_m;
775}
776
777void VM_Version::revert() {
778  _features = saved_features;
779}
780