globals.hpp revision 13213:4358b7205556
1101099Srwatson/* 2126097Srwatson * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3140628Srwatson * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4101099Srwatson * 5101099Srwatson * This code is free software; you can redistribute it and/or modify it 6101099Srwatson * under the terms of the GNU General Public License version 2 only, as 7101099Srwatson * published by the Free Software Foundation. 8140628Srwatson * 9140628Srwatson * This code is distributed in the hope that it will be useful, but WITHOUT 10140628Srwatson * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11140628Srwatson * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12101099Srwatson * version 2 for more details (a copy is included in the LICENSE file that 13101099Srwatson * accompanied this code). 14101099Srwatson * 15101099Srwatson * You should have received a copy of the GNU General Public License version 16101099Srwatson * 2 along with this work; if not, write to the Free Software Foundation, 17101099Srwatson * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18101099Srwatson * 19101099Srwatson * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20101099Srwatson * or visit www.oracle.com if you need additional information or have any 21101099Srwatson * questions. 22101099Srwatson * 23101099Srwatson */ 24101099Srwatson 25101099Srwatson#ifndef SHARE_VM_RUNTIME_GLOBALS_HPP 26101099Srwatson#define SHARE_VM_RUNTIME_GLOBALS_HPP 27101099Srwatson 28101099Srwatson#include "utilities/globalDefinitions.hpp" 29101099Srwatson#include "utilities/macros.hpp" 30101099Srwatson 31101099Srwatson#include <float.h> // for DBL_MAX 32101099Srwatson 33101099Srwatson// use this for flags that are true per default in the tiered build 34101099Srwatson// but false in non-tiered builds, and vice versa 35101099Srwatson#ifdef TIERED 36101099Srwatson#define trueInTiered true 37101099Srwatson#define falseInTiered false 38101099Srwatson#else 39101099Srwatson#define trueInTiered false 40101099Srwatson#define falseInTiered true 41101099Srwatson#endif 42101099Srwatson 43101099Srwatson#include CPU_HEADER(globals) 44101099Srwatson#include OS_HEADER(globals) 45101099Srwatson#include OS_CPU_HEADER(globals) 46105988Srwatson#ifdef COMPILER1 47101099Srwatson#include CPU_HEADER(c1_globals) 48101099Srwatson#include OS_HEADER(c1_globals) 49103183Sbde#endif 50145076Scsjp#ifdef COMPILER2 51101099Srwatson#include CPU_HEADER(c2_globals) 52101099Srwatson#include OS_HEADER(c2_globals) 53115497Srwatson#endif 54101099Srwatson#ifdef SHARK 55101099Srwatson#ifdef ZERO 56101099Srwatson# include "shark_globals_zero.hpp" 57105696Srwatson#endif 58101099Srwatson#endif 59101099Srwatson 60101099Srwatson#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !INCLUDE_JVMCI 61101099Srwatsondefine_pd_global(bool, BackgroundCompilation, false); 62101099Srwatsondefine_pd_global(bool, UseTLAB, false); 63150340Sphkdefine_pd_global(bool, CICompileOSR, false); 64101099Srwatsondefine_pd_global(bool, UseTypeProfile, false); 65140628Srwatsondefine_pd_global(bool, UseOnStackReplacement, false); 66140628Srwatsondefine_pd_global(bool, InlineIntrinsics, false); 67140628Srwatsondefine_pd_global(bool, PreferInterpreterNativeStubs, true); 68101099Srwatsondefine_pd_global(bool, ProfileInterpreter, false); 69145855Srwatsondefine_pd_global(bool, ProfileTraps, false); 70145855Srwatsondefine_pd_global(bool, TieredCompilation, false); 71101099Srwatson 72101099Srwatsondefine_pd_global(intx, CompileThreshold, 0); 73101099Srwatson 74101099Srwatsondefine_pd_global(intx, OnStackReplacePercentage, 0); 75101099Srwatsondefine_pd_global(bool, ResizeTLAB, false); 76101099Srwatsondefine_pd_global(intx, FreqInlineSize, 0); 77101099Srwatsondefine_pd_global(size_t, NewSizeThreadIncrease, 4*K); 78101099Srwatsondefine_pd_global(intx, InlineClassNatives, true); 79122875Srwatsondefine_pd_global(intx, InlineUnsafeOps, true); 80101099Srwatsondefine_pd_global(intx, InitialCodeCacheSize, 160*K); 81101099Srwatsondefine_pd_global(intx, ReservedCodeCacheSize, 32*M); 82122879Srwatsondefine_pd_global(intx, NonProfiledCodeHeapSize, 0); 83101099Srwatsondefine_pd_global(intx, ProfiledCodeHeapSize, 0); 84101099Srwatsondefine_pd_global(intx, NonNMethodCodeHeapSize, 32*M); 85101099Srwatson 86101099Srwatsondefine_pd_global(intx, CodeCacheExpansionSize, 32*K); 87101099Srwatsondefine_pd_global(intx, CodeCacheMinBlockLength, 1); 88101099Srwatsondefine_pd_global(intx, CodeCacheMinimumUseSpace, 200*K); 89101099Srwatsondefine_pd_global(size_t, MetaspaceSize, ScaleForWordSize(4*M)); 90101099Srwatsondefine_pd_global(bool, NeverActAsServerClassMachine, true); 91101099Srwatsondefine_pd_global(uint64_t,MaxRAM, 1ULL*G); 92101099Srwatson#define CI_COMPILER_COUNT 0 93101099Srwatson#else 94105988Srwatson 95105988Srwatson#if defined(COMPILER2) || INCLUDE_JVMCI 96105988Srwatson#define CI_COMPILER_COUNT 2 97105988Srwatson#else 98107731Srwatson#define CI_COMPILER_COUNT 1 99101099Srwatson#endif // COMPILER2 || INCLUDE_JVMCI 100101099Srwatson 101102980Srwatson#endif // no compilers 102101099Srwatson 103101099Srwatson// string type aliases used only in this file 104101099Srwatsontypedef const char* ccstr; 105101099Srwatsontypedef const char* ccstrlist; // represents string arguments which accumulate 106101099Srwatson 107101099Srwatson// function type that will construct default range string 108101099Srwatsontypedef const char* (*RangeStrFunc)(void); 109101099Srwatson 110101099Srwatsonstruct Flag { 111101099Srwatson enum Flags { 112101099Srwatson // latest value origin 113101099Srwatson DEFAULT = 0, 114101099Srwatson COMMAND_LINE = 1, 115101099Srwatson ENVIRON_VAR = 2, 116101099Srwatson CONFIG_FILE = 3, 117101099Srwatson MANAGEMENT = 4, 118105643Srwatson ERGONOMIC = 5, 119105643Srwatson ATTACH_ON_DEMAND = 6, 120105643Srwatson INTERNAL = 7, 121105643Srwatson 122105606Srwatson LAST_VALUE_ORIGIN = INTERNAL, 123105606Srwatson VALUE_ORIGIN_BITS = 4, 124105606Srwatson VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS), 125105606Srwatson 126105606Srwatson // flag kind 127153927Scsjp KIND_PRODUCT = 1 << 4, 128153927Scsjp KIND_MANAGEABLE = 1 << 5, 129153927Scsjp KIND_DIAGNOSTIC = 1 << 6, 130153927Scsjp KIND_EXPERIMENTAL = 1 << 7, 131153927Scsjp KIND_NOT_PRODUCT = 1 << 8, 132105637Srwatson KIND_DEVELOP = 1 << 9, 133101099Srwatson KIND_PLATFORM_DEPENDENT = 1 << 10, 134105637Srwatson KIND_READ_WRITE = 1 << 11, 135105637Srwatson KIND_C1 = 1 << 12, 136101099Srwatson KIND_C2 = 1 << 13, 137101099Srwatson KIND_ARCH = 1 << 14, 138101099Srwatson KIND_SHARK = 1 << 15, 139132781Skan KIND_LP64_PRODUCT = 1 << 16, 140101099Srwatson KIND_COMMERCIAL = 1 << 17, 141122879Srwatson KIND_JVMCI = 1 << 18, 142101099Srwatson 143105643Srwatson // set this bit if the flag was set on the command line 144105643Srwatson ORIG_COMMAND_LINE = 1 << 19, 145105643Srwatson 146105643Srwatson KIND_MASK = ~(VALUE_ORIGIN_MASK | ORIG_COMMAND_LINE) 147105643Srwatson }; 148105643Srwatson 149105643Srwatson enum Error { 150105643Srwatson // no error 151105643Srwatson SUCCESS = 0, 152105643Srwatson // flag name is missing 153101099Srwatson MISSING_NAME, 154104514Srwatson // flag value is missing 155101099Srwatson MISSING_VALUE, 156101099Srwatson // error parsing the textual form of the value 157122879Srwatson WRONG_FORMAT, 158101099Srwatson // flag is not writable 159101099Srwatson NON_WRITABLE, 160101099Srwatson // flag value is outside of its bounds 161101099Srwatson OUT_OF_BOUNDS, 162101099Srwatson // flag value violates its constraint 163101099Srwatson VIOLATES_CONSTRAINT, 164101099Srwatson // there is no flag with the given name 165122879Srwatson INVALID_FLAG, 166101099Srwatson // the flag can only be set only on command line during invocation of the VM 167101099Srwatson COMMAND_LINE_ONLY, 168101099Srwatson // the flag may only be set once 169101099Srwatson SET_ONLY_ONCE, 170101099Srwatson // the flag is not writable in this combination of product/debug build 171105634Srwatson CONSTANT, 172105634Srwatson // other, unspecified error related to setting the flag 173105634Srwatson ERR_OTHER 174105634Srwatson }; 175105634Srwatson 176105634Srwatson enum MsgType { 177105634Srwatson NONE = 0, 178105634Srwatson DIAGNOSTIC_FLAG_BUT_LOCKED, 179105634Srwatson EXPERIMENTAL_FLAG_BUT_LOCKED, 180101099Srwatson DEVELOPER_FLAG_BUT_PRODUCT_BUILD, 181101099Srwatson NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD 182101099Srwatson }; 183105643Srwatson 184101099Srwatson const char* _type; 185105736Srwatson const char* _name; 186101099Srwatson void* _addr; 187101099Srwatson NOT_PRODUCT(const char* _doc;) 188101099Srwatson Flags _flags; 189101099Srwatson size_t _name_len; 190101099Srwatson 191101099Srwatson // points to all Flags static array 192101099Srwatson static Flag* flags; 193101099Srwatson 194101099Srwatson // number of flags 195101099Srwatson static size_t numFlags; 196101099Srwatson 197101099Srwatson static Flag* find_flag(const char* name) { return find_flag(name, strlen(name), true, true); }; 198101099Srwatson static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false); 199101099Srwatson static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false); 200101099Srwatson 201101099Srwatson static const char* get_int_default_range_str(); 202101099Srwatson static const char* get_uint_default_range_str(); 203101099Srwatson static const char* get_intx_default_range_str(); 204101099Srwatson static const char* get_uintx_default_range_str(); 205101099Srwatson static const char* get_uint64_t_default_range_str(); 206101099Srwatson static const char* get_size_t_default_range_str(); 207101099Srwatson static const char* get_double_default_range_str(); 208101099Srwatson 209101099Srwatson Flag::Error check_writable(bool changed); 210101099Srwatson 211101099Srwatson bool is_bool() const; 212101099Srwatson bool get_bool() const; 213101099Srwatson Flag::Error set_bool(bool value); 214105643Srwatson 215105643Srwatson bool is_int() const; 216105643Srwatson int get_int() const; 217105643Srwatson Flag::Error set_int(int value); 218105643Srwatson 219101099Srwatson bool is_uint() const; 220101099Srwatson uint get_uint() const; 221101099Srwatson Flag::Error set_uint(uint value); 222101099Srwatson 223101099Srwatson bool is_intx() const; 224101099Srwatson intx get_intx() const; 225101099Srwatson Flag::Error set_intx(intx value); 226101099Srwatson 227101099Srwatson bool is_uintx() const; 228101099Srwatson uintx get_uintx() const; 229101099Srwatson Flag::Error set_uintx(uintx value); 230101099Srwatson 231101099Srwatson bool is_uint64_t() const; 232101099Srwatson uint64_t get_uint64_t() const; 233105988Srwatson Flag::Error set_uint64_t(uint64_t value); 234105988Srwatson 235105988Srwatson bool is_size_t() const; 236105988Srwatson size_t get_size_t() const; 237132232Srwatson Flag::Error set_size_t(size_t value); 238132232Srwatson 239132232Srwatson bool is_double() const; 240105988Srwatson double get_double() const; 241105988Srwatson Flag::Error set_double(double value); 242105988Srwatson 243105988Srwatson bool is_ccstr() const; 244105988Srwatson bool ccstr_accumulates() const; 245105988Srwatson ccstr get_ccstr() const; 246101099Srwatson Flag::Error set_ccstr(ccstr value); 247101099Srwatson 248101099Srwatson Flags get_origin(); 249101099Srwatson void set_origin(Flags origin); 250101099Srwatson 251101099Srwatson size_t get_name_length(); 252101099Srwatson 253101099Srwatson bool is_default(); 254101099Srwatson bool is_ergonomic(); 255101099Srwatson bool is_command_line(); 256136774Srwatson void set_command_line(); 257136774Srwatson 258101099Srwatson bool is_product() const; 259101099Srwatson bool is_manageable() const; 260132232Srwatson bool is_diagnostic() const; 261132232Srwatson bool is_experimental() const; 262103750Srwatson bool is_notproduct() const; 263132232Srwatson bool is_develop() const; 264101099Srwatson bool is_read_write() const; 265101099Srwatson bool is_commercial() const; 266132232Srwatson 267132232Srwatson bool is_constant_in_binary() const; 268101099Srwatson 269101099Srwatson bool is_unlocker() const; 270101099Srwatson bool is_unlocked() const; 271101099Srwatson bool is_writeable() const; 272101099Srwatson bool is_external() const; 273101099Srwatson 274132232Srwatson bool is_unlocker_ext() const; 275101099Srwatson bool is_unlocked_ext() const; 276132232Srwatson bool is_writeable_ext() const; 277132232Srwatson bool is_external_ext() const; 278132232Srwatson 279132232Srwatson void clear_diagnostic(); 280101099Srwatson 281132232Srwatson Flag::MsgType get_locked_message(char*, int) const; 282101099Srwatson void get_locked_message_ext(char*, int) const; 283101099Srwatson 284101099Srwatson // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges 285101099Srwatson void print_on(outputStream* st, bool withComments = false, bool printRanges = false); 286101099Srwatson void print_kind_and_origin(outputStream* st); 287101099Srwatson void print_as_flag(outputStream* st); 288101099Srwatson 289101099Srwatson static const char* flag_error_str(Flag::Error error); 290101099Srwatson}; 291101099Srwatson 292101099Srwatson// debug flags control various aspects of the VM and are global accessible 293101099Srwatson 294101099Srwatson// use FlagSetting to temporarily change some debug flag 295101099Srwatson// e.g. FlagSetting fs(DebugThisAndThat, true); 296132232Srwatson// restored to previous value upon leaving scope 297101099Srwatsonclass FlagSetting { 298101099Srwatson bool val; 299132232Srwatson bool* flag; 300132232Srwatson public: 301132232Srwatson FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; } 302132232Srwatson ~FlagSetting() { *flag = val; } 303101099Srwatson}; 304132232Srwatson 305101099Srwatson 306101099Srwatsonclass CounterSetting { 307101099Srwatson intx* counter; 308105634Srwatson public: 309105634Srwatson CounterSetting(intx* cnt) { counter = cnt; (*counter)++; } 310105634Srwatson ~CounterSetting() { (*counter)--; } 311132232Srwatson}; 312132232Srwatson 313105634Srwatsonclass IntFlagSetting { 314105634Srwatson int val; 315105634Srwatson int* flag; 316105634Srwatson public: 317105634Srwatson IntFlagSetting(int& fl, int newValue) { flag = &fl; val = fl; fl = newValue; } 318105634Srwatson ~IntFlagSetting() { *flag = val; } 319105637Srwatson}; 320105634Srwatson 321105634Srwatsonclass UIntFlagSetting { 322105634Srwatson uint val; 323105634Srwatson uint* flag; 324105634Srwatson public: 325105634Srwatson UIntFlagSetting(uint& fl, uint newValue) { flag = &fl; val = fl; fl = newValue; } 326106090Srwatson ~UIntFlagSetting() { *flag = val; } 327105634Srwatson}; 328105634Srwatson 329105634Srwatsonclass UIntXFlagSetting { 330105634Srwatson uintx val; 331106090Srwatson uintx* flag; 332105634Srwatson public: 333132232Srwatson UIntXFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; } 334132232Srwatson ~UIntXFlagSetting() { *flag = val; } 335105634Srwatson}; 336105634Srwatson 337105634Srwatsonclass DoubleFlagSetting { 338105634Srwatson double val; 339105634Srwatson double* flag; 340105634Srwatson public: 341105634Srwatson DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; } 342105634Srwatson ~DoubleFlagSetting() { *flag = val; } 343105634Srwatson}; 344105634Srwatson 345105634Srwatsonclass SizeTFlagSetting { 346105634Srwatson size_t val; 347105634Srwatson size_t* flag; 348105634Srwatson public: 349105634Srwatson SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; } 350105634Srwatson ~SizeTFlagSetting() { *flag = val; } 351106091Srwatson}; 352132232Srwatson 353105988Srwatson// Helper class for temporarily saving the value of a flag during a scope. 354105988Srwatsontemplate <size_t SIZE> 355132232Srwatsonclass FlagGuard { 356132232Srwatson unsigned char _value[SIZE]; 357105988Srwatson void* const _addr; 358132232Srwatson 359105988Srwatson // Hide operator new, this class should only be allocated on the stack. 360105988Srwatson // NOTE: Cannot include memory/allocation.hpp here due to circular 361105634Srwatson // dependencies. 362101099Srwatson void* operator new(size_t size) throw(); 363101099Srwatson void* operator new [](size_t size) throw(); 364101099Srwatson 365132232Srwatson public: 366132232Srwatson FlagGuard(void* flag_addr) : _addr(flag_addr) { 367101099Srwatson memcpy(_value, _addr, SIZE); 368101099Srwatson } 369101099Srwatson 370101099Srwatson ~FlagGuard() { 371101099Srwatson memcpy(_addr, _value, SIZE); 372101099Srwatson } 373132232Srwatson}; 374105643Srwatson 375132232Srwatson#define FLAG_GUARD(f) FlagGuard<sizeof(f)> f ## _guard(&f) 376101099Srwatson 377101099Srwatsonclass CommandLineFlags { 378101099Srwatsonpublic: 379101099Srwatson static Flag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false); 380101099Srwatson static Flag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); } 381101099Srwatson static Flag::Error boolAtPut(Flag* flag, bool* value, Flag::Flags origin); 382101099Srwatson static Flag::Error boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin); 383132232Srwatson static Flag::Error boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); } 384101099Srwatson 385101099Srwatson static Flag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false); 386101099Srwatson static Flag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); } 387101099Srwatson static Flag::Error intAtPut(Flag* flag, int* value, Flag::Flags origin); 388101099Srwatson static Flag::Error intAtPut(const char* name, size_t len, int* value, Flag::Flags origin); 389101099Srwatson static Flag::Error intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); } 390101099Srwatson 391101099Srwatson static Flag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false); 392101099Srwatson static Flag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); } 393101099Srwatson static Flag::Error uintAtPut(Flag* flag, uint* value, Flag::Flags origin); 394101099Srwatson static Flag::Error uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin); 395105643Srwatson static Flag::Error uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); } 396105643Srwatson 397105643Srwatson static Flag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false); 398101099Srwatson static Flag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); } 399101099Srwatson static Flag::Error intxAtPut(Flag* flag, intx* value, Flag::Flags origin); 400101099Srwatson static Flag::Error intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin); 401101099Srwatson static Flag::Error intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); } 402101099Srwatson 403101099Srwatson static Flag::Error uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false); 404101099Srwatson static Flag::Error uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); } 405101099Srwatson static Flag::Error uintxAtPut(Flag* flag, uintx* value, Flag::Flags origin); 406101099Srwatson static Flag::Error uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin); 407101099Srwatson static Flag::Error uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); } 408101099Srwatson 409101099Srwatson static Flag::Error size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false); 410101099Srwatson static Flag::Error size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); } 411101099Srwatson static Flag::Error size_tAtPut(Flag* flag, size_t* value, Flag::Flags origin); 412105643Srwatson static Flag::Error size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin); 413105643Srwatson static Flag::Error size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); } 414105643Srwatson 415101099Srwatson static Flag::Error uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false); 416101099Srwatson static Flag::Error uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); } 417101099Srwatson static Flag::Error uint64_tAtPut(Flag* flag, uint64_t* value, Flag::Flags origin); 418101099Srwatson static Flag::Error uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin); 419101099Srwatson static Flag::Error uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); } 420101099Srwatson 421101099Srwatson static Flag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false); 422101099Srwatson static Flag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); } 423101099Srwatson static Flag::Error doubleAtPut(Flag* flag, double* value, Flag::Flags origin); 424101099Srwatson static Flag::Error doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin); 425101099Srwatson static Flag::Error doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); } 426101099Srwatson 427101099Srwatson static Flag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false); 428101099Srwatson static Flag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); } 429101099Srwatson // Contract: Flag will make private copy of the incoming value. 430101099Srwatson // Outgoing value is always malloc-ed, and caller MUST call free. 431101099Srwatson static Flag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin); 432101099Srwatson static Flag::Error ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); } 433101099Srwatson 434101099Srwatson // Returns false if name is not a command line flag. 435105643Srwatson static bool wasSetOnCmdline(const char* name, bool* value); 436105643Srwatson static void printSetFlags(outputStream* out); 437101099Srwatson 438101099Srwatson // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges 439101099Srwatson static void printFlags(outputStream* out, bool withComments, bool printRanges = false); 440101099Srwatson 441105643Srwatson static void verify() PRODUCT_RETURN; 442105643Srwatson}; 443105643Srwatson 444105643Srwatson// use this for flags that are true by default in the debug version but 445101099Srwatson// false in the optimized version, and vice versa 446101099Srwatson#ifdef ASSERT 447105643Srwatson#define trueInDebug true 448105643Srwatson#define falseInDebug false 449105643Srwatson#else 450105643Srwatson#define trueInDebug false 451101099Srwatson#define falseInDebug true 452101099Srwatson#endif 453101099Srwatson 454101099Srwatson// use this for flags that are true per default in the product build 455132232Srwatson// but false in development builds, and vice versa 456105643Srwatson#ifdef PRODUCT 457101099Srwatson#define trueInProduct true 458101099Srwatson#define falseInProduct false 459132232Srwatson#else 460132232Srwatson#define trueInProduct false 461105643Srwatson#define falseInProduct true 462132232Srwatson#endif 463132232Srwatson 464132232Srwatson// develop flags are settable / visible only during development and are constant in the PRODUCT version 465101099Srwatson// product flags are always settable / visible 466101099Srwatson// notproduct flags are settable / visible only during development and are not declared in the PRODUCT version 467101099Srwatson 468101099Srwatson// A flag must be declared with one of the following types: 469101099Srwatson// bool, int, uint, intx, uintx, size_t, ccstr, double, or uint64_t. 470105643Srwatson// The type "ccstr" is an alias for "const char*" and is used 471101099Srwatson// only in this file, because the macrology requires single-token type names. 472101099Srwatson 473101099Srwatson// Note: Diagnostic options not meant for VM tuning or for product modes. 474101099Srwatson// They are to be used for VM quality assurance or field diagnosis 475101099Srwatson// of VM bugs. They are hidden so that users will not be encouraged to 476101099Srwatson// try them as if they were VM ordinary execution options. However, they 477101099Srwatson// are available in the product version of the VM. Under instruction 478101099Srwatson// from support engineers, VM customers can turn them on to collect 479101099Srwatson// diagnostic information about VM problems. To use a VM diagnostic 480132232Srwatson// option, you must first specify +UnlockDiagnosticVMOptions. 481101099Srwatson// (This master switch also affects the behavior of -Xprintflags.) 482101099Srwatson// 483132232Srwatson// experimental flags are in support of features that are not 484132232Srwatson// part of the officially supported product, but are available 485101099Srwatson// for experimenting with. They could, for example, be performance 486132232Srwatson// features that may not have undergone full or rigorous QA, but which may 487132232Srwatson// help performance in some cases and released for experimentation 488101099Srwatson// by the community of users and developers. This flag also allows one to 489101099Srwatson// be able to build a fully supported product that nonetheless also 490105656Srwatson// ships with some unsupported, lightly tested, experimental features. 491105656Srwatson// Like the UnlockDiagnosticVMOptions flag above, there is a corresponding 492105656Srwatson// UnlockExperimentalVMOptions flag, which allows the control and 493105656Srwatson// modification of the experimental flags. 494132232Srwatson// 495132232Srwatson// Nota bene: neither diagnostic nor experimental options should be used casually, 496105656Srwatson// and they are not supported on production loads, except under explicit 497105656Srwatson// direction from support engineers. 498105656Srwatson// 499105656Srwatson// manageable flags are writeable external product flags. 500101099Srwatson// They are dynamically writeable through the JDK management interface 501101099Srwatson// (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole. 502101099Srwatson// These flags are external exported interface (see CCC). The list of 503101099Srwatson// manageable flags can be queried programmatically through the management 504101099Srwatson// interface. 505101099Srwatson// 506101099Srwatson// A flag can be made as "manageable" only if 507122879Srwatson// - the flag is defined in a CCC as an external exported interface. 508122879Srwatson// - the VM implementation supports dynamic setting of the flag. 509101099Srwatson// This implies that the VM must *always* query the flag variable 510101099Srwatson// and not reuse state related to the flag state at any given time. 511101099Srwatson// - you want the flag to be queried programmatically by the customers. 512101099Srwatson// 513101099Srwatson// product_rw flags are writeable internal product flags. 514101099Srwatson// They are like "manageable" flags but for internal/private use. 515104514Srwatson// The list of product_rw flags are internal/private flags which 516101099Srwatson// may be changed/removed in a future release. It can be set 517101099Srwatson// through the management interface to get/set value 518132781Skan// when the name of flag is supplied. 519101099Srwatson// 520101099Srwatson// A flag can be made as "product_rw" only if 521101099Srwatson// - the VM implementation supports dynamic setting of the flag. 522104514Srwatson// This implies that the VM must *always* query the flag variable 523101099Srwatson// and not reuse state related to the flag state at any given time. 524101099Srwatson// 525132781Skan// Note that when there is a need to support develop flags to be writeable, 526101099Srwatson// it can be done in the same way as product_rw. 527101099Srwatson// 528101099Srwatson// range is a macro that will expand to min and max arguments for range 529101099Srwatson// checking code if provided - see commandLineFlagRangeList.hpp 530101099Srwatson// 531101099Srwatson// constraint is a macro that will expand to custom function call 532101099Srwatson// for constraint checking if provided - see commandLineFlagConstraintList.hpp 533104514Srwatson// 534101099Srwatson// writeable is a macro that controls if and how the value can change during the runtime 535101099Srwatson// 536101099Srwatson// writeable(Always) is optional and allows the flag to have its value changed 537132781Skan// without any limitations at any time 538101099Srwatson// 539101099Srwatson// writeable(Once) flag value's can be only set once during the lifetime of VM 540105696Srwatson// 541115497Srwatson// writeable(CommandLineOnly) flag value's can be only set from command line 542115497Srwatson// (multiple times allowed) 543115497Srwatson// 544105696Srwatson 545115497Srwatson 546115497Srwatson#define RUNTIME_FLAGS(develop, \ 547105696Srwatson develop_pd, \ 548115497Srwatson product, \ 549105696Srwatson product_pd, \ 550105696Srwatson diagnostic, \ 551105696Srwatson diagnostic_pd, \ 552115497Srwatson experimental, \ 553105696Srwatson notproduct, \ 554105696Srwatson manageable, \ 555115497Srwatson product_rw, \ 556105696Srwatson lp64_product, \ 557105696Srwatson range, \ 558115497Srwatson constraint, \ 559105696Srwatson writeable) \ 560105696Srwatson \ 561115497Srwatson lp64_product(bool, UseCompressedOops, false, \ 562115497Srwatson "Use 32-bit object references in 64-bit VM. " \ 563115497Srwatson "lp64_product means flag is always constant in 32 bit VM") \ 564115497Srwatson \ 565115497Srwatson lp64_product(bool, UseCompressedClassPointers, false, \ 566115497Srwatson "Use 32-bit class pointers in 64-bit VM. " \ 567115497Srwatson "lp64_product means flag is always constant in 32 bit VM") \ 568115497Srwatson \ 569115497Srwatson notproduct(bool, CheckCompressedOops, true, \ 570115497Srwatson "Generate checks in encoding/decoding code in debug VM") \ 571115497Srwatson \ 572115497Srwatson product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \ 573115497Srwatson "Heap allocation steps through preferred address regions to find" \ 574115497Srwatson " where it can allocate the heap. Number of steps to take per " \ 575115497Srwatson "region.") \ 576115497Srwatson range(1, max_uintx) \ 577115497Srwatson \ 578105696Srwatson lp64_product(intx, ObjectAlignmentInBytes, 8, \ 579115497Srwatson "Default object alignment in bytes, 8 is minimum") \ 580105696Srwatson range(8, 256) \ 581105696Srwatson constraint(ObjectAlignmentInBytesConstraintFunc,AtParse) \ 582105696Srwatson \ 583105696Srwatson product(bool, AssumeMP, false, \ 584105696Srwatson "Instruct the VM to assume multiple processors are available") \ 585105696Srwatson \ 586105696Srwatson /* UseMembar is theoretically a temp flag used for memory barrier */ \ 587115497Srwatson /* removal testing. It was supposed to be removed before FCS but has */ \ 588116701Srwatson /* been re-added (see 6401008) */ \ 589116701Srwatson product_pd(bool, UseMembar, \ 590116701Srwatson "(Unstable) Issues membars on thread state transitions") \ 591116701Srwatson \ 592116701Srwatson develop(bool, CleanChunkPoolAsync, true, \ 593115497Srwatson "Clean the chunk pool asynchronously") \ 594101099Srwatson \ 595116701Srwatson experimental(bool, AlwaysSafeConstructors, false, \ 596101099Srwatson "Force safe construction, as if all fields are final.") \ 597105696Srwatson \ 598132232Srwatson diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \ 599132232Srwatson "Enable normal processing of flags relating to field diagnostics")\ 600115497Srwatson \ 601105696Srwatson experimental(bool, UnlockExperimentalVMOptions, false, \ 602105696Srwatson "Enable normal processing of flags relating to experimental " \ 603105696Srwatson "features") \ 604105696Srwatson \ 605116701Srwatson product(bool, JavaMonitorsInStackTrace, true, \ 606105696Srwatson "Print information about Java monitor locks when the stacks are" \ 607105696Srwatson "dumped") \ 608116701Srwatson \ 609115497Srwatson product_pd(bool, UseLargePages, \ 610105696Srwatson "Use large page memory") \ 611105696Srwatson \ 612116701Srwatson product_pd(bool, UseLargePagesIndividualAllocation, \ 613105696Srwatson "Allocate large pages individually for better affinity") \ 614105696Srwatson \ 615116701Srwatson develop(bool, LargePagesIndividualAllocationInjectError, false, \ 616115497Srwatson "Fail large pages individual allocation") \ 617105696Srwatson \ 618105696Srwatson product(bool, UseLargePagesInMetaspace, false, \ 619116701Srwatson "Use large page memory in metaspace. " \ 620105696Srwatson "Only used if UseLargePages is enabled.") \ 621105696Srwatson \ 622105696Srwatson product(bool, UseNUMA, false, \ 623105696Srwatson "Use NUMA if available") \ 624105696Srwatson \ 625105696Srwatson product(bool, UseNUMAInterleaving, false, \ 626105696Srwatson "Interleave memory across NUMA nodes if available") \ 627105696Srwatson \ 628116701Srwatson product(size_t, NUMAInterleaveGranularity, 2*M, \ 629105696Srwatson "Granularity to use for NUMA interleaving on Windows OS") \ 630101099Srwatson range(os::vm_allocation_granularity(), NOT_LP64(2*G) LP64_ONLY(8192*G)) \ 631101099Srwatson \ 632105696Srwatson product(bool, ForceNUMA, false, \ 633105696Srwatson "Force NUMA optimizations on single-node/UMA systems") \ 634105696Srwatson \ 635105696Srwatson product(uintx, NUMAChunkResizeWeight, 20, \ 636105696Srwatson "Percentage (0-100) used to weight the current sample when " \ 637101099Srwatson "computing exponentially decaying average for " \ 638116701Srwatson "AdaptiveNUMAChunkSizing") \ 639105696Srwatson range(0, 100) \ 640105696Srwatson \ 641105696Srwatson product(size_t, NUMASpaceResizeRate, 1*G, \ 642105696Srwatson "Do not reallocate more than this amount per collection") \ 643101099Srwatson range(0, max_uintx) \ 644115395Srwatson \ 645115395Srwatson product(bool, UseAdaptiveNUMAChunkSizing, true, \ 646105696Srwatson "Enable adaptive chunk sizing for NUMA") \ 647105696Srwatson \ 648105696Srwatson product(bool, NUMAStats, false, \ 649105696Srwatson "Print NUMA stats in detailed heap information") \ 650105696Srwatson \ 651105696Srwatson product(uintx, NUMAPageScanRate, 256, \ 652105696Srwatson "Maximum number of pages to include in the page scan procedure") \ 653105696Srwatson range(0, max_uintx) \ 654105696Srwatson \ 655105696Srwatson product_pd(bool, NeedsDeoptSuspend, \ 656105696Srwatson "True for register window machines (sparc/ia64)") \ 657105696Srwatson \ 658105696Srwatson product(intx, UseSSE, 99, \ 659105696Srwatson "Highest supported SSE instructions set on x86/x64") \ 660115395Srwatson range(0, 99) \ 661105696Srwatson \ 662115395Srwatson product(bool, UseAES, false, \ 663115395Srwatson "Control whether AES instructions are used when available") \ 664115395Srwatson \ 665115395Srwatson product(bool, UseFMA, false, \ 666115395Srwatson "Control whether FMA instructions are used when available") \ 667115395Srwatson \ 668105696Srwatson product(bool, UseSHA, false, \ 669115395Srwatson "Control whether SHA instructions are used when available") \ 670115395Srwatson \ 671115395Srwatson diagnostic(bool, UseGHASHIntrinsics, false, \ 672105696Srwatson "Use intrinsics for GHASH versions of crypto") \ 673115395Srwatson \ 674115395Srwatson product(size_t, LargePageSizeInBytes, 0, \ 675115395Srwatson "Large page size (0 to let VM choose the page size)") \ 676115395Srwatson range(0, max_uintx) \ 677115395Srwatson \ 678115395Srwatson product(size_t, LargePageHeapSizeThreshold, 128*M, \ 679115395Srwatson "Use large pages if maximum heap is at least this big") \ 680115395Srwatson range(0, max_uintx) \ 681115395Srwatson \ 682105696Srwatson product(bool, ForceTimeHighResolution, false, \ 683115395Srwatson "Using high time resolution (for Win32 only)") \ 684115395Srwatson \ 685115395Srwatson develop(bool, TracePcPatching, false, \ 686105696Srwatson "Trace usage of frame::patch_pc") \ 687115395Srwatson \ 688105696Srwatson develop(bool, TraceRelocator, false, \ 689115395Srwatson "Trace the bytecode relocator") \ 690105696Srwatson \ 691105696Srwatson develop(bool, TraceLongCompiles, false, \ 692105696Srwatson "Print out every time compilation is longer than " \ 693105696Srwatson "a given threshold") \ 694105696Srwatson \ 695105696Srwatson develop(bool, SafepointALot, false, \ 696105696Srwatson "Generate a lot of safepoints. This works with " \ 697105696Srwatson "GuaranteedSafepointInterval") \ 698105696Srwatson \ 699105696Srwatson product_pd(bool, BackgroundCompilation, \ 700105696Srwatson "A thread requesting compilation is not blocked during " \ 701105696Srwatson "compilation") \ 702105696Srwatson \ 703132232Srwatson product(bool, PrintVMQWaitTime, false, \ 704101099Srwatson "Print out the waiting time in VM operation queue") \ 705101099Srwatson \ 706132232Srwatson develop(bool, TraceOopMapGeneration, false, \ 707132232Srwatson "Show OopMapGeneration") \ 708132232Srwatson \ 709115395Srwatson product(bool, MethodFlushing, true, \ 710115395Srwatson "Reclamation of zombie and not-entrant methods") \ 711115395Srwatson \ 712115395Srwatson develop(bool, VerifyStack, false, \ 713105696Srwatson "Verify stack of each thread when it is entering a runtime call") \ 714115395Srwatson \ 715115395Srwatson diagnostic(bool, ForceUnreachable, false, \ 716105696Srwatson "Make all non code cache addresses to be unreachable by " \ 717115395Srwatson "forcing use of 64bit literal fixups") \ 718105696Srwatson \ 719115395Srwatson notproduct(bool, StressDerivedPointers, false, \ 720115395Srwatson "Force scavenge when a derived pointer is detected on stack " \ 721115395Srwatson "after rtm call") \ 722105696Srwatson \ 723115395Srwatson develop(bool, TraceDerivedPointers, false, \ 724105696Srwatson "Trace traversal of derived pointers on stack") \ 725105696Srwatson \ 726115395Srwatson notproduct(bool, TraceCodeBlobStacks, false, \ 727101099Srwatson "Trace stack-walk of codeblobs") \ 728105696Srwatson \ 729132232Srwatson product(bool, PrintJNIResolving, false, \ 730132232Srwatson "Used to implement -v:jni") \ 731105696Srwatson \ 732105696Srwatson notproduct(bool, PrintRewrites, false, \ 733132232Srwatson "Print methods that are being rewritten") \ 734105696Srwatson \ 735105696Srwatson product(bool, UseInlineCaches, true, \ 736105696Srwatson "Use Inline Caches for virtual calls ") \ 737105696Srwatson \ 738105696Srwatson diagnostic(bool, InlineArrayCopy, true, \ 739105696Srwatson "Inline arraycopy native that is known to be part of " \ 740105696Srwatson "base library DLL") \ 741105696Srwatson \ 742105696Srwatson diagnostic(bool, InlineObjectHash, true, \ 743105696Srwatson "Inline Object::hashCode() native that is known to be part " \ 744105696Srwatson "of base library DLL") \ 745105696Srwatson \ 746105696Srwatson diagnostic(bool, InlineNatives, true, \ 747105696Srwatson "Inline natives that are known to be part of base library DLL") \ 748101099Srwatson \ 749101099Srwatson diagnostic(bool, InlineMathNatives, true, \ 750101099Srwatson "Inline SinD, CosD, etc.") \ 751101099Srwatson \ 752105696Srwatson diagnostic(bool, InlineClassNatives, true, \ 753105696Srwatson "Inline Class.isInstance, etc") \ 754101099Srwatson \ 755105696Srwatson diagnostic(bool, InlineThreadNatives, true, \ 756105696Srwatson "Inline Thread.currentThread, etc") \ 757105696Srwatson \ 758105696Srwatson diagnostic(bool, InlineUnsafeOps, true, \ 759105696Srwatson "Inline memory ops (native methods) from Unsafe") \ 760105696Srwatson \ 761105696Srwatson product(bool, CriticalJNINatives, true, \ 762105696Srwatson "Check for critical JNI entry points") \ 763105696Srwatson \ 764105696Srwatson notproduct(bool, StressCriticalJNINatives, false, \ 765105696Srwatson "Exercise register saving code in critical natives") \ 766105696Srwatson \ 767105696Srwatson diagnostic(bool, UseAESIntrinsics, false, \ 768105696Srwatson "Use intrinsics for AES versions of crypto") \ 769105696Srwatson \ 770105696Srwatson diagnostic(bool, UseAESCTRIntrinsics, false, \ 771105696Srwatson "Use intrinsics for the paralleled version of AES/CTR crypto") \ 772105696Srwatson \ 773105696Srwatson diagnostic(bool, UseSHA1Intrinsics, false, \ 774101099Srwatson "Use intrinsics for SHA-1 crypto hash function. " \ 775101099Srwatson "Requires that UseSHA is enabled.") \ 776101099Srwatson \ 777105696Srwatson diagnostic(bool, UseSHA256Intrinsics, false, \ 778105696Srwatson "Use intrinsics for SHA-224 and SHA-256 crypto hash functions. " \ 779105696Srwatson "Requires that UseSHA is enabled.") \ 780105696Srwatson \ 781105696Srwatson diagnostic(bool, UseSHA512Intrinsics, false, \ 782105696Srwatson "Use intrinsics for SHA-384 and SHA-512 crypto hash functions. " \ 783105696Srwatson "Requires that UseSHA is enabled.") \ 784101099Srwatson \ 785101099Srwatson diagnostic(bool, UseCRC32Intrinsics, false, \ 786101099Srwatson "use intrinsics for java.util.zip.CRC32") \ 787101099Srwatson \ 788101099Srwatson diagnostic(bool, UseCRC32CIntrinsics, false, \ 789147982Srwatson "use intrinsics for java.util.zip.CRC32C") \ 790147982Srwatson \ 791101099Srwatson diagnostic(bool, UseAdler32Intrinsics, false, \ 792101099Srwatson "use intrinsics for java.util.zip.Adler32") \ 793101099Srwatson \ 794101099Srwatson diagnostic(bool, UseVectorizedMismatchIntrinsic, false, \ 795101099Srwatson "Enables intrinsification of ArraysSupport.vectorizedMismatch()") \ 796101099Srwatson \ 797101099Srwatson diagnostic(ccstrlist, DisableIntrinsic, "", \ 798101099Srwatson "do not expand intrinsics whose (internal) names appear here") \ 799101099Srwatson \ 800101099Srwatson develop(bool, TraceCallFixup, false, \ 801105606Srwatson "Trace all call fixups") \ 802105606Srwatson \ 803105606Srwatson develop(bool, DeoptimizeALot, false, \ 804105606Srwatson "Deoptimize at every exit from the runtime system") \ 805101099Srwatson \ 806101099Srwatson notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ 807132232Srwatson "A comma separated list of bcis to deoptimize at") \ 808101099Srwatson \ 809101099Srwatson product(bool, DeoptimizeRandom, false, \ 810101099Srwatson "Deoptimize random frames on random exit from the runtime system")\ 811107698Srwatson \ 812107698Srwatson notproduct(bool, ZombieALot, false, \ 813101099Srwatson "Create zombies (non-entrant) at exit from the runtime system") \ 814101099Srwatson \ 815101099Srwatson product(bool, UnlinkSymbolsALot, false, \ 816101099Srwatson "Unlink unreferenced symbols from the symbol table at safepoints")\ 817132232Srwatson \ 818101099Srwatson notproduct(bool, WalkStackALot, false, \ 819101099Srwatson "Trace stack (no print) at every exit from the runtime system") \ 820101099Srwatson \ 821107698Srwatson product(bool, Debugging, false, \ 822107698Srwatson "Set when executing debug methods in debug.cpp " \ 823122563Srwatson "(to prevent triggering assertions)") \ 824104535Srwatson \ 825104535Srwatson notproduct(bool, StrictSafepointChecks, trueInDebug, \ 826104535Srwatson "Enable strict checks that safepoints cannot happen for threads " \ 827122524Srwatson "that use NoSafepointVerifier") \ 828104535Srwatson \ 829104535Srwatson notproduct(bool, VerifyLastFrame, false, \ 830132232Srwatson "Verify oops on last frame on entry to VM") \ 831104535Srwatson \ 832104535Srwatson product(bool, FailOverToOldVerifier, true, \ 833104535Srwatson "Fail over to old verifier when split verifier fails") \ 834101099Srwatson \ 835101099Srwatson develop(bool, ShowSafepointMsgs, false, \ 836101099Srwatson "Show message about safepoint synchronization") \ 837101099Srwatson \ 838101099Srwatson product(bool, SafepointTimeout, false, \ 839122524Srwatson "Time out and warn or fail after SafepointTimeoutDelay " \ 840101099Srwatson "milliseconds if failed to reach safepoint") \ 841132232Srwatson \ 842101099Srwatson develop(bool, DieOnSafepointTimeout, false, \ 843132232Srwatson "Die upon failure to reach safepoint (see SafepointTimeout)") \ 844101099Srwatson \ 845101099Srwatson /* 50 retries * (5 * current_retry_count) millis = ~6.375 seconds */ \ 846101099Srwatson /* typically, at most a few retries are needed */ \ 847101099Srwatson product(intx, SuspendRetryCount, 50, \ 848101099Srwatson "Maximum retry count for an external suspend request") \ 849101099Srwatson range(0, max_intx) \ 850101099Srwatson \ 851101099Srwatson product(intx, SuspendRetryDelay, 5, \ 852101099Srwatson "Milliseconds to delay per retry (* current_retry_count)") \ 853101099Srwatson range(0, max_intx) \ 854101099Srwatson \ 855105656Srwatson product(bool, AssertOnSuspendWaitFailure, false, \ 856101099Srwatson "Assert/Guarantee on external suspend wait failure") \ 857101099Srwatson \ 858101099Srwatson product(bool, TraceSuspendWaitFailures, false, \ 859107698Srwatson "Trace external suspend wait failures") \ 860107698Srwatson \ 861107698Srwatson product(bool, MaxFDLimit, true, \ 862101099Srwatson "Bump the number of file descriptors to maximum in Solaris") \ 863101099Srwatson \ 864101099Srwatson diagnostic(bool, LogEvents, true, \ 865101099Srwatson "Enable the various ring buffer event logs") \ 866101099Srwatson \ 867101099Srwatson diagnostic(uintx, LogEventsBufferEntries, 10, \ 868105656Srwatson "Number of ring buffer event logs") \ 869101099Srwatson range(1, NOT_LP64(1*K) LP64_ONLY(1*M)) \ 870101099Srwatson \ 871101099Srwatson product(bool, BytecodeVerificationRemote, true, \ 872105988Srwatson "Enable the Java bytecode verifier for remote classes") \ 873105988Srwatson \ 874105988Srwatson product(bool, BytecodeVerificationLocal, false, \ 875101099Srwatson "Enable the Java bytecode verifier for local classes") \ 876101099Srwatson \ 877101099Srwatson develop(bool, ForceFloatExceptions, trueInDebug, \ 878105988Srwatson "Force exceptions on FP stack under/overflow") \ 879105988Srwatson \ 880101099Srwatson develop(bool, VerifyStackAtCalls, false, \ 881132232Srwatson "Verify that the stack pointer is unchanged after calls") \ 882101099Srwatson \ 883101099Srwatson develop(bool, TraceJavaAssertions, false, \ 884101099Srwatson "Trace java language assertions") \ 885105988Srwatson \ 886105988Srwatson notproduct(bool, CheckAssertionStatusDirectives, false, \ 887101099Srwatson "Temporary - see javaClasses.cpp") \ 888105988Srwatson \ 889106354Smux notproduct(bool, PrintMallocFree, false, \ 890101099Srwatson "Trace calls to C heap malloc/free allocation") \ 891105988Srwatson \ 892105988Srwatson notproduct(bool, VerifyCodeCache, false, \ 893101099Srwatson "Verify code cache on memory allocation/deallocation") \ 894105988Srwatson \ 895105988Srwatson develop(bool, UseMallocOnly, false, \ 896105988Srwatson "Use only malloc/free for allocation (no resource area/arena)") \ 897105988Srwatson \ 898105988Srwatson develop(bool, PrintMalloc, false, \ 899105988Srwatson "Print all malloc/free calls") \ 900105988Srwatson \ 901132232Srwatson develop(bool, PrintMallocStatistics, false, \ 902105988Srwatson "Print malloc/free statistics") \ 903105988Srwatson \ 904101099Srwatson develop(bool, ZapResourceArea, trueInDebug, \ 905101099Srwatson "Zap freed resource/arena space with 0xABABABAB") \ 906105988Srwatson \ 907105988Srwatson notproduct(bool, ZapVMHandleArea, trueInDebug, \ 908105988Srwatson "Zap freed VM handle space with 0xBCBCBCBC") \ 909105988Srwatson \ 910105988Srwatson develop(bool, ZapJNIHandleArea, trueInDebug, \ 911105988Srwatson "Zap freed JNI handle space with 0xFEFEFEFE") \ 912105988Srwatson \ 913105988Srwatson notproduct(bool, ZapStackSegments, trueInDebug, \ 914105988Srwatson "Zap allocated/freed stack segments with 0xFADFADED") \ 915132232Srwatson \ 916132232Srwatson develop(bool, ZapUnusedHeapArea, trueInDebug, \ 917105988Srwatson "Zap unused heap space with 0xBAADBABE") \ 918105988Srwatson \ 919101099Srwatson develop(bool, CheckZapUnusedHeapArea, false, \ 920132232Srwatson "Check zapping of unused heap space") \ 921101099Srwatson \ 922101099Srwatson develop(bool, ZapFillerObjects, trueInDebug, \ 923101099Srwatson "Zap filler objects with 0xDEAFBABE") \ 924101099Srwatson \ 925105988Srwatson develop(bool, PrintVMMessages, true, \ 926105988Srwatson "Print VM messages on console") \ 927101099Srwatson \ 928101099Srwatson notproduct(uintx, ErrorHandlerTest, 0, \ 929101099Srwatson "If > 0, provokes an error after VM initialization; the value " \ 930101099Srwatson "determines which error to provoke. See test_error_handler() " \ 931105988Srwatson "in vmError.cpp.") \ 932101099Srwatson \ 933132232Srwatson notproduct(uintx, TestCrashInErrorHandler, 0, \ 934101099Srwatson "If > 0, provokes an error inside VM error handler (a secondary " \ 935101099Srwatson "crash). see test_error_handler() in vmError.cpp") \ 936105988Srwatson \ 937105988Srwatson notproduct(bool, TestSafeFetchInErrorHandler, false, \ 938105988Srwatson "If true, tests SafeFetch inside error handler.") \ 939105988Srwatson \ 940105988Srwatson notproduct(bool, TestUnresponsiveErrorHandler, false, \ 941105988Srwatson "If true, simulates an unresponsive error handler.") \ 942105988Srwatson \ 943105988Srwatson develop(bool, Verbose, false, \ 944105988Srwatson "Print additional debugging information from other modes") \ 945105988Srwatson \ 946105988Srwatson develop(bool, PrintMiscellaneous, false, \ 947105988Srwatson "Print uncategorized debugging information (requires +Verbose)") \ 948122524Srwatson \ 949105988Srwatson develop(bool, WizardMode, false, \ 950132232Srwatson "Print much more debugging information") \ 951105988Srwatson \ 952105988Srwatson product(bool, ShowMessageBoxOnError, false, \ 953105988Srwatson "Keep process alive on VM fatal error") \ 954105988Srwatson \ 955132232Srwatson product(bool, CreateCoredumpOnCrash, true, \ 956105988Srwatson "Create core/mini dump on VM fatal error") \ 957105988Srwatson \ 958105988Srwatson product(uint64_t, ErrorLogTimeout, 2 * 60, \ 959105988Srwatson "Timeout, in seconds, to limit the time spent on writing an " \ 960105988Srwatson "error log in case of a crash.") \ 961105988Srwatson range(0, (uint64_t)max_jlong/1000) \ 962105988Srwatson \ 963105988Srwatson product_pd(bool, UseOSErrorReporting, \ 964105988Srwatson "Let VM fatal error propagate to the OS (ie. WER on Windows)") \ 965105988Srwatson \ 966105988Srwatson product(bool, SuppressFatalErrorMessage, false, \ 967105988Srwatson "Report NO fatal error message (avoid deadlock)") \ 968105988Srwatson \ 969105988Srwatson product(ccstrlist, OnError, "", \ 970105988Srwatson "Run user-defined commands on fatal error; see VMError.cpp " \ 971132232Srwatson "for examples") \ 972105988Srwatson \ 973105988Srwatson product(ccstrlist, OnOutOfMemoryError, "", \ 974132232Srwatson "Run user-defined commands on first java.lang.OutOfMemoryError") \ 975105988Srwatson \ 976105988Srwatson manageable(bool, HeapDumpBeforeFullGC, false, \ 977105988Srwatson "Dump heap to file before any major stop-the-world GC") \ 978105988Srwatson \ 979105988Srwatson manageable(bool, HeapDumpAfterFullGC, false, \ 980105988Srwatson "Dump heap to file after any major stop-the-world GC") \ 981101099Srwatson \ 982101099Srwatson manageable(bool, HeapDumpOnOutOfMemoryError, false, \ 983101099Srwatson "Dump heap to file when java.lang.OutOfMemoryError is thrown") \ 984101099Srwatson \ 985122875Srwatson manageable(ccstr, HeapDumpPath, NULL, \ 986122875Srwatson "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ 987122875Srwatson "directory) of the dump file (defaults to java_pid<pid>.hprof " \ 988122875Srwatson "in the working directory)") \ 989122875Srwatson \ 990122875Srwatson develop(bool, BreakAtWarning, false, \ 991122875Srwatson "Execute breakpoint upon encountering VM warning") \ 992122875Srwatson \ 993132232Srwatson develop(bool, UseFakeTimers, false, \ 994122875Srwatson "Tell whether the VM should use system time or a fake timer") \ 995122875Srwatson \ 996122875Srwatson product(ccstr, NativeMemoryTracking, "off", \ 997101099Srwatson "Native memory tracking options") \ 998101099Srwatson \ 999101099Srwatson diagnostic(bool, PrintNMTStatistics, false, \ 1000101099Srwatson "Print native memory tracking summary data if it is on") \ 1001101099Srwatson \ 1002101099Srwatson diagnostic(bool, LogCompilation, false, \ 1003101099Srwatson "Log compilation activity in detail to LogFile") \ 1004101099Srwatson \ 1005132232Srwatson product(bool, PrintCompilation, false, \ 1006101099Srwatson "Print compilations") \ 1007101099Srwatson \ 1008101099Srwatson diagnostic(bool, TraceNMethodInstalls, false, \ 1009101099Srwatson "Trace nmethod installation") \ 1010101099Srwatson \ 1011101099Srwatson diagnostic(intx, ScavengeRootsInCode, 2, \ 1012101099Srwatson "0: do not allow scavengable oops in the code cache; " \ 1013101099Srwatson "1: allow scavenging from the code cache; " \ 1014122524Srwatson "2: emit as many constants as the compiler can see") \ 1015101099Srwatson range(0, 2) \ 1016101099Srwatson \ 1017132232Srwatson product(bool, AlwaysRestoreFPU, false, \ 1018101099Srwatson "Restore the FPU control word after every JNI call (expensive)") \ 1019101099Srwatson \ 1020101099Srwatson diagnostic(bool, PrintCompilation2, false, \ 1021125293Srwatson "Print additional statistics per compilation") \ 1022101099Srwatson \ 1023101099Srwatson diagnostic(bool, PrintAdapterHandlers, false, \ 1024101099Srwatson "Print code generated for i2c/c2i adapters") \ 1025101099Srwatson \ 1026122524Srwatson diagnostic(bool, VerifyAdapterCalls, trueInDebug, \ 1027101099Srwatson "Verify that i2c/c2i adapters are called properly") \ 1028101099Srwatson \ 1029132232Srwatson develop(bool, VerifyAdapterSharing, false, \ 1030101099Srwatson "Verify that the code for shared adapters is the equivalent") \ 1031101099Srwatson \ 1032101099Srwatson diagnostic(bool, PrintAssembly, false, \ 1033145855Srwatson "Print assembly code (using external disassembler.so)") \ 1034145855Srwatson \ 1035145855Srwatson diagnostic(ccstr, PrintAssemblyOptions, NULL, \ 1036145855Srwatson "Print options string passed to disassembler.so") \ 1037145855Srwatson \ 1038145855Srwatson notproduct(bool, PrintNMethodStatistics, false, \ 1039145855Srwatson "Print a summary statistic for the generated nmethods") \ 1040145855Srwatson \ 1041145855Srwatson diagnostic(bool, PrintNMethods, false, \ 1042145855Srwatson "Print assembly code for nmethods when generated") \ 1043145855Srwatson \ 1044145855Srwatson diagnostic(bool, PrintNativeNMethods, false, \ 1045101099Srwatson "Print assembly code for native nmethods when generated") \ 1046101099Srwatson \ 1047101099Srwatson develop(bool, PrintDebugInfo, false, \ 1048101099Srwatson "Print debug information for all nmethods when generated") \ 1049101099Srwatson \ 1050101099Srwatson develop(bool, PrintRelocations, false, \ 1051101099Srwatson "Print relocation information for all nmethods when generated") \ 1052101099Srwatson \ 1053101099Srwatson develop(bool, PrintDependencies, false, \ 1054132232Srwatson "Print dependency information for all nmethods when generated") \ 1055101099Srwatson \ 1056101099Srwatson develop(bool, PrintExceptionHandlers, false, \ 1057101099Srwatson "Print exception handler tables for all nmethods when generated") \ 1058101099Srwatson \ 1059101099Srwatson develop(bool, StressCompiledExceptionHandlers, false, \ 1060101099Srwatson "Exercise compiled exception handlers") \ 1061101099Srwatson \ 1062101099Srwatson develop(bool, InterceptOSException, false, \ 1063101099Srwatson "Start debugger when an implicit OS (e.g. NULL) " \ 1064101099Srwatson "exception happens") \ 1065101099Srwatson \ 1066105656Srwatson product(bool, PrintCodeCache, false, \ 1067101099Srwatson "Print the code cache memory usage when exiting") \ 1068101099Srwatson \ 1069101099Srwatson develop(bool, PrintCodeCache2, false, \ 1070125293Srwatson "Print detailed usage information on the code cache when exiting")\ 1071101099Srwatson \ 1072101099Srwatson product(bool, PrintCodeCacheOnCompilation, false, \ 1073101099Srwatson "Print the code cache memory usage each time a method is " \ 1074101099Srwatson "compiled") \ 1075101099Srwatson \ 1076101099Srwatson diagnostic(bool, PrintStubCode, false, \ 1077101099Srwatson "Print generated stub code") \ 1078105656Srwatson \ 1079101099Srwatson product(bool, StackTraceInThrowable, true, \ 1080101099Srwatson "Collect backtrace in throwable when exception happens") \ 1081101099Srwatson \ 1082101099Srwatson product(bool, OmitStackTraceInFastThrow, true, \ 1083101099Srwatson "Omit backtraces for some 'hot' exceptions in optimized code") \ 1084101099Srwatson \ 1085101099Srwatson product(bool, ProfilerPrintByteCodeStatistics, false, \ 1086101099Srwatson "Print bytecode statistics when dumping profiler output") \ 1087101099Srwatson \ 1088101099Srwatson product(bool, ProfilerRecordPC, false, \ 1089101099Srwatson "Collect ticks for each 16 byte interval of compiled code") \ 1090132232Srwatson \ 1091101099Srwatson product(bool, ProfileVM, false, \ 1092101099Srwatson "Profile ticks that fall within VM (either in the VM Thread " \ 1093101099Srwatson "or VM code called through stubs)") \ 1094140628Srwatson \ 1095140628Srwatson product(bool, ProfileIntervals, false, \ 1096140628Srwatson "Print profiles for each interval (see ProfileIntervalsTicks)") \ 1097140628Srwatson \ 1098140628Srwatson notproduct(bool, ProfilerCheckIntervals, false, \ 1099140628Srwatson "Collect and print information on spacing of profiler ticks") \ 1100140628Srwatson \ 1101140628Srwatson product(bool, PrintWarnings, true, \ 1102140628Srwatson "Print JVM warnings to output stream") \ 1103140628Srwatson \ 1104140628Srwatson notproduct(uintx, WarnOnStalledSpinLock, 0, \ 1105140628Srwatson "Print warnings for stalled SpinLocks") \ 1106140628Srwatson \ 1107140628Srwatson product(bool, RegisterFinalizersAtInit, true, \ 1108140628Srwatson "Register finalizable objects at end of Object.<init> or " \ 1109140628Srwatson "after allocation") \ 1110140628Srwatson \ 1111140628Srwatson develop(bool, RegisterReferences, true, \ 1112140628Srwatson "Tell whether the VM should register soft/weak/final/phantom " \ 1113140628Srwatson "references") \ 1114140628Srwatson \ 1115140628Srwatson develop(bool, IgnoreRewrites, false, \ 1116140628Srwatson "Suppress rewrites of bytecodes in the oopmap generator. " \ 1117140628Srwatson "This is unsafe!") \ 1118140628Srwatson \ 1119140628Srwatson develop(bool, PrintCodeCacheExtension, false, \ 1120140628Srwatson "Print extension of code cache") \ 1121140628Srwatson \ 1122140628Srwatson develop(bool, UsePrivilegedStack, true, \ 1123147091Srwatson "Enable the security JVM functions") \ 1124140628Srwatson \ 1125140628Srwatson develop(bool, ProtectionDomainVerification, true, \ 1126140628Srwatson "Verify protection domain before resolution in system dictionary")\ 1127140628Srwatson \ 1128140628Srwatson product(bool, ClassUnloading, true, \ 1129140628Srwatson "Do unloading of classes") \ 1130140628Srwatson \ 1131140628Srwatson product(bool, ClassUnloadingWithConcurrentMark, true, \ 1132140628Srwatson "Do unloading of classes with a concurrent marking cycle") \ 1133140628Srwatson \ 1134140628Srwatson develop(bool, DisableStartThread, false, \ 1135140628Srwatson "Disable starting of additional Java threads " \ 1136140628Srwatson "(for debugging only)") \ 1137140628Srwatson \ 1138140628Srwatson develop(bool, MemProfiling, false, \ 1139140628Srwatson "Write memory usage profiling to log file") \ 1140140628Srwatson \ 1141140628Srwatson notproduct(bool, PrintSystemDictionaryAtExit, false, \ 1142140628Srwatson "Print the system dictionary at exit") \ 1143140628Srwatson \ 1144140628Srwatson experimental(intx, PredictedLoadedClassCount, 0, \ 1145140628Srwatson "Experimental: Tune loaded class cache starting size") \ 1146140628Srwatson \ 1147101099Srwatson diagnostic(bool, UnsyncloadClass, false, \ 1148101099Srwatson "Unstable: VM calls loadClass unsynchronized. Custom " \ 1149101099Srwatson "class loader must call VM synchronized for findClass " \ 1150101099Srwatson "and defineClass.") \ 1151101099Srwatson \ 1152101099Srwatson product(bool, AlwaysLockClassLoader, false, \ 1153101099Srwatson "Require the VM to acquire the class loader lock before calling " \ 1154101099Srwatson "loadClass() even for class loaders registering " \ 1155101099Srwatson "as parallel capable") \ 1156101099Srwatson \ 1157101099Srwatson product(bool, AllowParallelDefineClass, false, \ 1158101099Srwatson "Allow parallel defineClass requests for class loaders " \ 1159132232Srwatson "registering as parallel capable") \ 1160101099Srwatson \ 1161101099Srwatson product(bool, MustCallLoadClassInternal, false, \ 1162101099Srwatson "Call loadClassInternal() rather than loadClass()") \ 1163101099Srwatson \ 1164101099Srwatson product_pd(bool, DontYieldALot, \ 1165101099Srwatson "Throw away obvious excess yield calls") \ 1166101099Srwatson \ 1167101099Srwatson develop(bool, UseDetachedThreads, true, \ 1168122524Srwatson "Use detached threads that are recycled upon termination " \ 1169101099Srwatson "(for Solaris only)") \ 1170101099Srwatson \ 1171132232Srwatson product(bool, UseLWPSynchronization, true, \ 1172101099Srwatson "Use LWP-based instead of libthread-based synchronization " \ 1173101099Srwatson "(SPARC only)") \ 1174101099Srwatson \ 1175101099Srwatson experimental(ccstr, SyncKnobs, NULL, \ 1176101099Srwatson "(Unstable) Various monitor synchronization tunables") \ 1177121816Sbrooks \ 1178101099Srwatson experimental(intx, EmitSync, 0, \ 1179101099Srwatson "(Unsafe, Unstable) " \ 1180110350Srwatson "Control emission of inline sync fast-path code") \ 1181101099Srwatson \ 1182101099Srwatson product(intx, MonitorBound, 0, "Bound Monitor population") \ 1183101099Srwatson range(0, max_jint) \ 1184153927Scsjp \ 1185110350Srwatson product(bool, MonitorInUseLists, true, "Track Monitors for Deflation") \ 1186101099Srwatson \ 1187101099Srwatson experimental(intx, MonitorUsedDeflationThreshold, 90, \ 1188101099Srwatson "Percentage of used monitors before triggering cleanup " \ 1189101099Srwatson "safepoint which deflates monitors (0 is off). " \ 1190110350Srwatson "The check is performed on GuaranteedSafepointInterval.") \ 1191101099Srwatson range(0, 100) \ 1192101099Srwatson \ 1193101099Srwatson experimental(intx, SyncFlags, 0, "(Unsafe, Unstable) " \ 1194110350Srwatson "Experimental Sync flags") \ 1195101099Srwatson \ 1196101099Srwatson experimental(intx, SyncVerbose, 0, "(Unstable)") \ 1197101099Srwatson \ 1198101099Srwatson diagnostic(bool, InlineNotify, true, "intrinsify subset of notify") \ 1199101099Srwatson \ 1200106089Srwatson experimental(intx, hashCode, 5, \ 1201101099Srwatson "(Unstable) select hashCode generation algorithm") \ 1202101099Srwatson \ 1203101099Srwatson product(bool, FilterSpuriousWakeups, true, \ 1204101099Srwatson "When true prevents OS-level spurious, or premature, wakeups " \ 1205101099Srwatson "from Object.wait (Ignored for Windows)") \ 1206101099Srwatson \ 1207101099Srwatson experimental(intx, NativeMonitorTimeout, -1, "(Unstable)") \ 1208101099Srwatson \ 1209101099Srwatson experimental(intx, NativeMonitorFlags, 0, "(Unstable)") \ 1210101099Srwatson \ 1211121816Sbrooks experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \ 1212110350Srwatson \ 1213101099Srwatson develop(bool, UsePthreads, false, \ 1214101099Srwatson "Use pthread-based instead of libthread-based synchronization " \ 1215106089Srwatson "(SPARC only)") \ 1216106089Srwatson \ 1217106089Srwatson product(bool, ReduceSignalUsage, false, \ 1218106089Srwatson "Reduce the use of OS signals in Java and/or the VM") \ 1219106089Srwatson \ 1220101099Srwatson develop_pd(bool, ShareVtableStubs, \ 1221101099Srwatson "Share vtable stubs (smaller code but worse branch prediction") \ 1222101099Srwatson \ 1223101099Srwatson develop(bool, LoadLineNumberTables, true, \ 1224101099Srwatson "Tell whether the class file parser loads line number tables") \ 1225101099Srwatson \ 1226101099Srwatson develop(bool, LoadLocalVariableTables, true, \ 1227132232Srwatson "Tell whether the class file parser loads local variable tables") \ 1228110350Srwatson \ 1229101099Srwatson develop(bool, LoadLocalVariableTypeTables, true, \ 1230101099Srwatson "Tell whether the class file parser loads local variable type" \ 1231101099Srwatson "tables") \ 1232101099Srwatson \ 1233101099Srwatson product(bool, AllowUserSignalHandlers, false, \ 1234101099Srwatson "Do not complain if the application installs signal handlers " \ 1235101099Srwatson "(Solaris & Linux only)") \ 1236101099Srwatson \ 1237101099Srwatson product(bool, UseSignalChaining, true, \ 1238101099Srwatson "Use signal-chaining to invoke signal handlers installed " \ 1239101099Srwatson "by the application (Solaris & Linux only)") \ 1240132232Srwatson \ 1241101099Srwatson product(bool, AllowJNIEnvProxy, false, \ 1242101099Srwatson "Allow JNIEnv proxies for jdbx") \ 1243101099Srwatson \ 1244101099Srwatson product(bool, RestoreMXCSROnJNICalls, false, \ 1245101099Srwatson "Restore MXCSR when returning from JNI calls") \ 1246101099Srwatson \ 1247101099Srwatson product(bool, CheckJNICalls, false, \ 1248101099Srwatson "Verify all arguments to JNI calls") \ 1249101099Srwatson \ 1250101099Srwatson product(bool, CheckEndorsedAndExtDirs, false, \ 1251101099Srwatson "Verify the endorsed and extension directories are not used") \ 1252101099Srwatson \ 1253132232Srwatson product(bool, UseFastJNIAccessors, true, \ 1254101099Srwatson "Use optimized versions of Get<Primitive>Field") \ 1255101099Srwatson \ 1256101099Srwatson product(intx, MaxJNILocalCapacity, 65536, \ 1257101099Srwatson "Maximum allowable local JNI handle capacity to " \ 1258101099Srwatson "EnsureLocalCapacity() and PushLocalFrame(), " \ 1259101099Srwatson "where <= 0 is unlimited, default: 65536") \ 1260101099Srwatson range(min_intx, max_intx) \ 1261101099Srwatson \ 1262101099Srwatson product(bool, EagerXrunInit, false, \ 1263101099Srwatson "Eagerly initialize -Xrun libraries; allows startup profiling, " \ 1264101099Srwatson "but not all -Xrun libraries may support the state of the VM " \ 1265132232Srwatson "at this time") \ 1266101099Srwatson \ 1267101099Srwatson product(bool, PreserveAllAnnotations, false, \ 1268101099Srwatson "Preserve RuntimeInvisibleAnnotations as well " \ 1269123607Srwatson "as RuntimeVisibleAnnotations") \ 1270123607Srwatson \ 1271123607Srwatson develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ 1272123607Srwatson "Number of OutOfMemoryErrors preallocated with backtrace") \ 1273123607Srwatson \ 1274123607Srwatson product(bool, UseXMMForArrayCopy, false, \ 1275123607Srwatson "Use SSE2 MOVQ instruction for Arraycopy") \ 1276123607Srwatson \ 1277132232Srwatson product(intx, FieldsAllocationStyle, 1, \ 1278123607Srwatson "0 - type based with oops first, " \ 1279123607Srwatson "1 - with oops last, " \ 1280123607Srwatson "2 - oops in super and sub classes are together") \ 1281101099Srwatson range(0, 2) \ 1282101099Srwatson \ 1283101099Srwatson product(bool, CompactFields, true, \ 1284101099Srwatson "Allocate nonstatic fields in gaps between previous fields") \ 1285101099Srwatson \ 1286101099Srwatson notproduct(bool, PrintFieldLayout, false, \ 1287101099Srwatson "Print field layout for each class") \ 1288132232Srwatson \ 1289101099Srwatson /* Need to limit the extent of the padding to reasonable size. */\ 1290101099Srwatson /* 8K is well beyond the reasonable HW cache line size, even with */\ 1291101099Srwatson /* aggressive prefetching, while still leaving the room for segregating */\ 1292101099Srwatson /* among the distinct pages. */\ 1293101099Srwatson product(intx, ContendedPaddingWidth, 128, \ 1294101099Srwatson "How many bytes to pad the fields/classes marked @Contended with")\ 1295101099Srwatson range(0, 8192) \ 1296101099Srwatson constraint(ContendedPaddingWidthConstraintFunc,AfterErgo) \ 1297101099Srwatson \ 1298101099Srwatson product(bool, EnableContended, true, \ 1299101099Srwatson "Enable @Contended annotation support") \ 1300132232Srwatson \ 1301101099Srwatson product(bool, RestrictContended, true, \ 1302101099Srwatson "Restrict @Contended to trusted classes") \ 1303101099Srwatson \ 1304101099Srwatson product(bool, UseBiasedLocking, true, \ 1305101099Srwatson "Enable biased locking in JVM") \ 1306101099Srwatson \ 1307101099Srwatson product(intx, BiasedLockingStartupDelay, 0, \ 1308101099Srwatson "Number of milliseconds to wait before enabling biased locking") \ 1309101099Srwatson range(0, (intx)(max_jint-(max_jint%PeriodicTask::interval_gran))) \ 1310101099Srwatson constraint(BiasedLockingStartupDelayFunc,AfterErgo) \ 1311101099Srwatson \ 1312132232Srwatson diagnostic(bool, PrintBiasedLockingStatistics, false, \ 1313101099Srwatson "Print statistics of biased locking in JVM") \ 1314101099Srwatson \ 1315101099Srwatson product(intx, BiasedLockingBulkRebiasThreshold, 20, \ 1316101099Srwatson "Threshold of number of revocations per type to try to " \ 1317101099Srwatson "rebias all objects in the heap of that type") \ 1318101099Srwatson range(0, max_intx) \ 1319101099Srwatson constraint(BiasedLockingBulkRebiasThresholdFunc,AfterErgo) \ 1320101099Srwatson \ 1321101099Srwatson product(intx, BiasedLockingBulkRevokeThreshold, 40, \ 1322101099Srwatson "Threshold of number of revocations per type to permanently " \ 1323101099Srwatson "revoke biases of all objects in the heap of that type") \ 1324101099Srwatson range(0, max_intx) \ 1325132232Srwatson constraint(BiasedLockingBulkRevokeThresholdFunc,AfterErgo) \ 1326101099Srwatson \ 1327101099Srwatson product(intx, BiasedLockingDecayTime, 25000, \ 1328101099Srwatson "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \ 1329101099Srwatson "type after previous bulk rebias") \ 1330101099Srwatson range(500, max_intx) \ 1331101099Srwatson constraint(BiasedLockingDecayTimeFunc,AfterErgo) \ 1332101099Srwatson \ 1333101099Srwatson product(bool, ExitOnOutOfMemoryError, false, \ 1334101099Srwatson "JVM exits on the first occurrence of an out-of-memory error") \ 1335101099Srwatson \ 1336101099Srwatson product(bool, CrashOnOutOfMemoryError, false, \ 1337132232Srwatson "JVM aborts, producing an error log and core/mini dump, on the " \ 1338101099Srwatson "first occurrence of an out-of-memory error") \ 1339101099Srwatson \ 1340101099Srwatson /* tracing */ \ 1341101099Srwatson \ 1342101099Srwatson develop(bool, StressRewriter, false, \ 1343101099Srwatson "Stress linktime bytecode rewriting") \ 1344101099Srwatson \ 1345101099Srwatson product(ccstr, TraceJVMTI, NULL, \ 1346101099Srwatson "Trace flags for JVMTI functions and events") \ 1347101099Srwatson \ 1348101099Srwatson /* This option can change an EMCP method into an obsolete method. */ \ 1349132232Srwatson /* This can affect tests that except specific methods to be EMCP. */ \ 1350101099Srwatson /* This option should be used with caution. */ \ 1351101099Srwatson product(bool, StressLdcRewrite, false, \ 1352101099Srwatson "Force ldc -> ldc_w rewrite during RedefineClasses") \ 1353101099Srwatson \ 1354101099Srwatson /* change to false by default sometime after Mustang */ \ 1355101099Srwatson product(bool, VerifyMergedCPBytecodes, true, \ 1356101099Srwatson "Verify bytecodes after RedefineClasses constant pool merging") \ 1357101099Srwatson \ 1358101099Srwatson develop(bool, TraceBytecodes, false, \ 1359101099Srwatson "Trace bytecode execution") \ 1360101099Srwatson \ 1361105656Srwatson develop(bool, TraceICs, false, \ 1362101099Srwatson "Trace inline cache changes") \ 1363101099Srwatson \ 1364101099Srwatson notproduct(bool, TraceInvocationCounterOverflow, false, \ 1365101099Srwatson "Trace method invocation counter overflow") \ 1366101099Srwatson \ 1367101099Srwatson develop(bool, TraceInlineCacheClearing, false, \ 1368101099Srwatson "Trace clearing of inline caches in nmethods") \ 1369101099Srwatson \ 1370101099Srwatson develop(bool, TraceDependencies, false, \ 1371101099Srwatson "Trace dependencies") \ 1372122875Srwatson \ 1373122875Srwatson develop(bool, VerifyDependencies, trueInDebug, \ 1374122875Srwatson "Exercise and verify the compilation dependency mechanism") \ 1375122875Srwatson \ 1376122875Srwatson develop(bool, TraceNewOopMapGeneration, false, \ 1377122875Srwatson "Trace OopMapGeneration") \ 1378122875Srwatson \ 1379122875Srwatson develop(bool, TraceNewOopMapGenerationDetailed, false, \ 1380122875Srwatson "Trace OopMapGeneration: print detailed cell states") \ 1381122875Srwatson \ 1382122875Srwatson develop(bool, TimeOopMap, false, \ 1383122875Srwatson "Time calls to GenerateOopMap::compute_map() in sum") \ 1384101099Srwatson \ 1385101099Srwatson develop(bool, TimeOopMap2, false, \ 1386101099Srwatson "Time calls to GenerateOopMap::compute_map() individually") \ 1387101099Srwatson \ 1388101099Srwatson develop(bool, TraceOopMapRewrites, false, \ 1389101099Srwatson "Trace rewriting of method oops during oop map generation") \ 1390101099Srwatson \ 1391101099Srwatson develop(bool, TraceICBuffer, false, \ 1392122524Srwatson "Trace usage of IC buffer") \ 1393101099Srwatson \ 1394132232Srwatson develop(bool, TraceCompiledIC, false, \ 1395105643Srwatson "Trace changes of compiled IC") \ 1396105643Srwatson \ 1397101099Srwatson /* gc */ \ 1398101099Srwatson \ 1399101099Srwatson product(bool, UseSerialGC, false, \ 1400101099Srwatson "Use the Serial garbage collector") \ 1401101099Srwatson \ 1402101099Srwatson product(bool, UseG1GC, false, \ 1403101099Srwatson "Use the Garbage-First garbage collector") \ 1404122524Srwatson \ 1405101099Srwatson product(bool, UseParallelGC, false, \ 1406132232Srwatson "Use the Parallel Scavenge garbage collector") \ 1407105643Srwatson \ 1408105643Srwatson product(bool, UseParallelOldGC, false, \ 1409101099Srwatson "Use the Parallel Old garbage collector") \ 1410101099Srwatson \ 1411101099Srwatson product(uintx, HeapMaximumCompactionInterval, 20, \ 1412101099Srwatson "How often should we maximally compact the heap (not allowing " \ 1413101099Srwatson "any dead space)") \ 1414101099Srwatson range(0, max_uintx) \ 1415101099Srwatson \ 1416101099Srwatson product(uintx, HeapFirstMaximumCompactionCount, 3, \ 1417122524Srwatson "The collection count for the first maximum compaction") \ 1418101099Srwatson range(0, max_uintx) \ 1419105656Srwatson \ 1420101099Srwatson product(bool, UseMaximumCompactionOnSystemGC, true, \ 1421101099Srwatson "Use maximum compaction in the Parallel Old garbage collector " \ 1422101099Srwatson "for a system GC") \ 1423140628Srwatson \ 1424140628Srwatson product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ 1425140628Srwatson "The mean used by the parallel compact dead wood " \ 1426140628Srwatson "limiter (a number between 0-100)") \ 1427140628Srwatson range(0, 100) \ 1428140628Srwatson \ 1429140628Srwatson product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ 1430140628Srwatson "The standard deviation used by the parallel compact dead wood " \ 1431140628Srwatson "limiter (a number between 0-100)") \ 1432140628Srwatson range(0, 100) \ 1433140628Srwatson \ 1434140628Srwatson product(uint, ParallelGCThreads, 0, \ 1435140628Srwatson "Number of parallel threads parallel gc will use") \ 1436140628Srwatson constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \ 1437140628Srwatson \ 1438140628Srwatson diagnostic(bool, UseSemaphoreGCThreadsSynchronization, true, \ 1439140628Srwatson "Use semaphore synchronization for the GC Threads, " \ 1440147091Srwatson "instead of synchronization based on mutexes") \ 1441140628Srwatson \ 1442140628Srwatson product(bool, UseDynamicNumberOfGCThreads, false, \ 1443140628Srwatson "Dynamically choose the number of parallel threads " \ 1444140628Srwatson "parallel gc will use") \ 1445140628Srwatson \ 1446140628Srwatson diagnostic(bool, InjectGCWorkerCreationFailure, false, \ 1447140628Srwatson "Inject thread creation failures for " \ 1448140628Srwatson "UseDynamicNumberOfGCThreads") \ 1449140628Srwatson \ 1450140628Srwatson diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ 1451140628Srwatson "Force dynamic selection of the number of " \ 1452140628Srwatson "parallel threads parallel gc will use to aid debugging") \ 1453101099Srwatson \ 1454101099Srwatson product(size_t, HeapSizePerGCThread, ScaleForWordSize(64*M), \ 1455101099Srwatson "Size of heap (bytes) per GC thread used in calculating the " \ 1456101099Srwatson "number of GC threads") \ 1457101099Srwatson range((size_t)os::vm_page_size(), (size_t)max_uintx) \ 1458101099Srwatson \ 1459101099Srwatson product(uint, ConcGCThreads, 0, \ 1460101099Srwatson "Number of threads concurrent gc will use") \ 1461101099Srwatson constraint(ConcGCThreadsConstraintFunc,AfterErgo) \ 1462101099Srwatson \ 1463101099Srwatson product(uint, GCTaskTimeStampEntries, 200, \ 1464101099Srwatson "Number of time stamp entries per gc worker thread") \ 1465101099Srwatson range(1, max_jint) \ 1466101099Srwatson \ 1467132232Srwatson product(bool, AlwaysTenure, false, \ 1468101099Srwatson "Always tenure objects in eden (ParallelGC only)") \ 1469101099Srwatson \ 1470101099Srwatson product(bool, NeverTenure, false, \ 1471101099Srwatson "Never tenure objects in eden, may tenure on overflow " \ 1472101099Srwatson "(ParallelGC only)") \ 1473101099Srwatson \ 1474101099Srwatson product(bool, ScavengeBeforeFullGC, true, \ 1475101099Srwatson "Scavenge youngest generation before each full GC.") \ 1476105634Srwatson \ 1477101099Srwatson product(bool, UseConcMarkSweepGC, false, \ 1478122524Srwatson "Use Concurrent Mark-Sweep GC in the old generation") \ 1479101099Srwatson \ 1480101099Srwatson product(bool, ExplicitGCInvokesConcurrent, false, \ 1481101099Srwatson "A System.gc() request invokes a concurrent collection; " \ 1482105634Srwatson "(effective only when using concurrent collectors)") \ 1483132232Srwatson \ 1484101099Srwatson product(bool, GCLockerInvokesConcurrent, false, \ 1485105634Srwatson "The exit of a JNI critical section necessitating a scavenge, " \ 1486105634Srwatson "also kicks off a background concurrent collection") \ 1487105634Srwatson \ 1488101099Srwatson product(uintx, GCLockerEdenExpansionPercent, 5, \ 1489101099Srwatson "How much the GC can expand the eden by while the GC locker " \ 1490105634Srwatson "is active (as a percentage)") \ 1491101099Srwatson range(0, 100) \ 1492105634Srwatson \ 1493105634Srwatson diagnostic(uintx, GCLockerRetryAllocationCount, 2, \ 1494110351Srwatson "Number of times to retry allocations when " \ 1495132232Srwatson "blocked by the GC locker") \ 1496110351Srwatson range(0, max_uintx) \ 1497110351Srwatson \ 1498110351Srwatson product(bool, UseCMSBestFit, true, \ 1499110351Srwatson "Use CMS best fit allocation strategy") \ 1500132232Srwatson \ 1501110351Srwatson product(uintx, ParallelGCBufferWastePct, 10, \ 1502110351Srwatson "Wasted fraction of parallel allocation buffer") \ 1503110351Srwatson range(0, 100) \ 1504132232Srwatson \ 1505132232Srwatson product(uintx, TargetPLABWastePct, 10, \ 1506105634Srwatson "Target wasted space in last buffer as percent of overall " \ 1507132232Srwatson "allocation") \ 1508132232Srwatson range(1, 100) \ 1509105634Srwatson \ 1510101099Srwatson product(uintx, PLABWeight, 75, \ 1511105634Srwatson "Percentage (0-100) used to weight the current sample when " \ 1512105634Srwatson "computing exponentially decaying average for ResizePLAB") \ 1513105634Srwatson range(0, 100) \ 1514105634Srwatson \ 1515105634Srwatson product(bool, ResizePLAB, true, \ 1516105634Srwatson "Dynamically resize (survivor space) promotion LAB's") \ 1517105634Srwatson \ 1518101099Srwatson product(int, ParGCArrayScanChunk, 50, \ 1519105634Srwatson "Scan a subset of object array and push remainder, if array is " \ 1520105634Srwatson "bigger than this") \ 1521105634Srwatson range(1, max_jint/3) \ 1522105634Srwatson \ 1523105634Srwatson product(bool, ParGCUseLocalOverflow, false, \ 1524105634Srwatson "Instead of a global overflow list, use local overflow stacks") \ 1525106090Srwatson \ 1526105634Srwatson product(bool, ParGCTrimOverflow, true, \ 1527105634Srwatson "Eagerly trim the local overflow lists " \ 1528105634Srwatson "(when ParGCUseLocalOverflow)") \ 1529105634Srwatson \ 1530105634Srwatson notproduct(bool, ParGCWorkQueueOverflowALot, false, \ 1531101099Srwatson "Simulate work queue overflow in ParNew") \ 1532101099Srwatson \ 1533101099Srwatson notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ 1534101099Srwatson "An `interval' counter that determines how frequently " \ 1535101099Srwatson "we simulate overflow; a smaller number increases frequency") \ 1536101099Srwatson \ 1537101099Srwatson product(uintx, ParGCDesiredObjsFromOverflowList, 20, \ 1538101099Srwatson "The desired number of objects to claim from the overflow list") \ 1539101099Srwatson range(0, max_uintx) \ 1540101099Srwatson \ 1541101099Srwatson diagnostic(uintx, ParGCStridesPerThread, 2, \ 1542122524Srwatson "The number of strides per worker thread that we divide up the " \ 1543122524Srwatson "card table scanning work into") \ 1544101099Srwatson range(1, max_uintx) \ 1545101099Srwatson constraint(ParGCStridesPerThreadConstraintFunc,AfterErgo) \ 1546132232Srwatson \ 1547101099Srwatson diagnostic(intx, ParGCCardsPerStrideChunk, 256, \ 1548101099Srwatson "The number of cards in each chunk of the parallel chunks used " \ 1549101099Srwatson "during card table scanning") \ 1550101099Srwatson range(1, max_intx) \ 1551101099Srwatson constraint(ParGCCardsPerStrideChunkConstraintFunc,AfterMemoryInit)\ 1552101099Srwatson \ 1553101099Srwatson product(uintx, OldPLABWeight, 50, \ 1554101099Srwatson "Percentage (0-100) used to weight the current sample when " \ 1555101099Srwatson "computing exponentially decaying average for resizing " \ 1556101099Srwatson "OldPLABSize") \ 1557105634Srwatson range(0, 100) \ 1558101099Srwatson \ 1559122524Srwatson product(bool, ResizeOldPLAB, true, \ 1560101099Srwatson "Dynamically resize (old gen) promotion LAB's") \ 1561101099Srwatson \ 1562105634Srwatson product(size_t, CMSOldPLABMax, 1024, \ 1563105634Srwatson "Maximum size of CMS gen promotion LAB caches per worker " \ 1564132232Srwatson "per block size") \ 1565105634Srwatson range(1, max_uintx) \ 1566105634Srwatson constraint(CMSOldPLABMaxConstraintFunc,AfterMemoryInit) \ 1567105634Srwatson \ 1568105634Srwatson product(size_t, CMSOldPLABMin, 16, \ 1569101099Srwatson "Minimum size of CMS gen promotion LAB caches per worker " \ 1570105634Srwatson "per block size") \ 1571106160Srwatson range(1, max_uintx) \ 1572106160Srwatson constraint(CMSOldPLABMinConstraintFunc,AfterMemoryInit) \ 1573106160Srwatson \ 1574106160Srwatson product(uintx, CMSOldPLABNumRefills, 4, \ 1575106160Srwatson "Nominal number of refills of CMS gen promotion LAB cache " \ 1576106160Srwatson "per worker per block size") \ 1577105634Srwatson range(1, max_uintx) \ 1578101099Srwatson \ 1579101099Srwatson product(bool, CMSOldPLABResizeQuicker, false, \ 1580103759Srwatson "React on-the-fly during a scavenge to a sudden " \ 1581101099Srwatson "change in block demand rate") \ 1582101099Srwatson \ 1583101099Srwatson product(uintx, CMSOldPLABToleranceFactor, 4, \ 1584101099Srwatson "The tolerance of the phase-change detector for on-the-fly " \ 1585103761Srwatson "PLAB resizing during a scavenge") \ 1586101099Srwatson range(1, max_uintx) \ 1587101099Srwatson \ 1588101099Srwatson product(uintx, CMSOldPLABReactivityFactor, 2, \ 1589101099Srwatson "The gain in the feedback loop for on-the-fly PLAB resizing " \ 1590101099Srwatson "during a scavenge") \ 1591103759Srwatson range(1, max_uintx) \ 1592132232Srwatson \ 1593101099Srwatson product(bool, AlwaysPreTouch, false, \ 1594101099Srwatson "Force all freshly committed pages to be pre-touched") \ 1595101099Srwatson \ 1596122875Srwatson product(size_t, PreTouchParallelChunkSize, 1 * G, \ 1597122875Srwatson "Per-thread chunk size for parallel memory pre-touch.") \ 1598122875Srwatson range(1, SIZE_MAX / 2) \ 1599122875Srwatson \ 1600122875Srwatson product_pd(size_t, CMSYoungGenPerWorker, \ 1601122875Srwatson "The maximum size of young gen chosen by default per GC worker " \ 1602122875Srwatson "thread available") \ 1603122875Srwatson range(1, max_uintx) \ 1604122875Srwatson \ 1605122875Srwatson product(uintx, CMSIncrementalSafetyFactor, 10, \ 1606122875Srwatson "Percentage (0-100) used to add conservatism when computing the " \ 1607132232Srwatson "duty cycle") \ 1608122875Srwatson range(0, 100) \ 1609122875Srwatson \ 1610122875Srwatson product(uintx, CMSExpAvgFactor, 50, \ 1611140628Srwatson "Percentage (0-100) used to weight the current sample when " \ 1612140628Srwatson "computing exponential averages for CMS statistics") \ 1613140628Srwatson range(0, 100) \ 1614140628Srwatson \ 1615140628Srwatson product(uintx, CMS_FLSWeight, 75, \ 1616140628Srwatson "Percentage (0-100) used to weight the current sample when " \ 1617140628Srwatson "computing exponentially decaying averages for CMS FLS " \ 1618140628Srwatson "statistics") \ 1619140628Srwatson range(0, 100) \ 1620140628Srwatson \ 1621140628Srwatson product(uintx, CMS_FLSPadding, 1, \ 1622140628Srwatson "The multiple of deviation from mean to use for buffering " \ 1623140628Srwatson "against volatility in free list demand") \ 1624140628Srwatson range(0, max_juint) \ 1625140628Srwatson \ 1626140628Srwatson product(uintx, FLSCoalescePolicy, 2, \ 1627140628Srwatson "CMS: aggressiveness level for coalescing, increasing " \ 1628140628Srwatson "from 0 to 4") \ 1629140628Srwatson range(0, 4) \ 1630140628Srwatson \ 1631140628Srwatson product(bool, FLSAlwaysCoalesceLarge, false, \ 1632140628Srwatson "CMS: larger free blocks are always available for coalescing") \ 1633140628Srwatson \ 1634140628Srwatson product(double, FLSLargestBlockCoalesceProximity, 0.99, \ 1635140628Srwatson "CMS: the smaller the percentage the greater the coalescing " \ 1636140628Srwatson "force") \ 1637140628Srwatson range(0.0, 1.0) \ 1638140628Srwatson \ 1639140628Srwatson product(double, CMSSmallCoalSurplusPercent, 1.05, \ 1640140628Srwatson "CMS: the factor by which to inflate estimated demand of small " \ 1641140628Srwatson "block sizes to prevent coalescing with an adjoining block") \ 1642140628Srwatson range(0.0, DBL_MAX) \ 1643140628Srwatson \ 1644140628Srwatson product(double, CMSLargeCoalSurplusPercent, 0.95, \ 1645140628Srwatson "CMS: the factor by which to inflate estimated demand of large " \ 1646140628Srwatson "block sizes to prevent coalescing with an adjoining block") \ 1647140628Srwatson range(0.0, DBL_MAX) \ 1648140628Srwatson \ 1649140628Srwatson product(double, CMSSmallSplitSurplusPercent, 1.10, \ 1650140628Srwatson "CMS: the factor by which to inflate estimated demand of small " \ 1651140628Srwatson "block sizes to prevent splitting to supply demand for smaller " \ 1652140628Srwatson "blocks") \ 1653140628Srwatson range(0.0, DBL_MAX) \ 1654140628Srwatson \ 1655140628Srwatson product(double, CMSLargeSplitSurplusPercent, 1.00, \ 1656140628Srwatson "CMS: the factor by which to inflate estimated demand of large " \ 1657140628Srwatson "block sizes to prevent splitting to supply demand for smaller " \ 1658140628Srwatson "blocks") \ 1659140628Srwatson range(0.0, DBL_MAX) \ 1660140628Srwatson \ 1661140628Srwatson product(bool, CMSExtrapolateSweep, false, \ 1662140628Srwatson "CMS: cushion for block demand during sweep") \ 1663140628Srwatson \ 1664140628Srwatson product(uintx, CMS_SweepWeight, 75, \ 1665140628Srwatson "Percentage (0-100) used to weight the current sample when " \ 1666140628Srwatson "computing exponentially decaying average for inter-sweep " \ 1667140628Srwatson "duration") \ 1668140628Srwatson range(0, 100) \ 1669140628Srwatson \ 1670140628Srwatson product(uintx, CMS_SweepPadding, 1, \ 1671140628Srwatson "The multiple of deviation from mean to use for buffering " \ 1672140628Srwatson "against volatility in inter-sweep duration") \ 1673140628Srwatson range(0, max_juint) \ 1674140628Srwatson \ 1675140628Srwatson product(uintx, CMS_SweepTimerThresholdMillis, 10, \ 1676140628Srwatson "Skip block flux-rate sampling for an epoch unless inter-sweep " \ 1677140628Srwatson "duration exceeds this threshold in milliseconds") \ 1678140628Srwatson range(0, max_uintx) \ 1679140628Srwatson \ 1680140628Srwatson product(bool, CMSClassUnloadingEnabled, true, \ 1681140628Srwatson "Whether class unloading enabled when using CMS GC") \ 1682140628Srwatson \ 1683140628Srwatson product(uintx, CMSClassUnloadingMaxInterval, 0, \ 1684140628Srwatson "When CMS class unloading is enabled, the maximum CMS cycle " \ 1685140628Srwatson "count for which classes may not be unloaded") \ 1686140628Srwatson range(0, max_uintx) \ 1687140628Srwatson \ 1688140628Srwatson product(uintx, CMSIndexedFreeListReplenish, 4, \ 1689140628Srwatson "Replenish an indexed free list with this number of chunks") \ 1690140628Srwatson range(1, max_uintx) \ 1691140628Srwatson \ 1692140628Srwatson product(bool, CMSReplenishIntermediate, true, \ 1693140628Srwatson "Replenish all intermediate free-list caches") \ 1694140628Srwatson \ 1695140628Srwatson product(bool, CMSSplitIndexedFreeListBlocks, true, \ 1696140628Srwatson "When satisfying batched demand, split blocks from the " \ 1697140628Srwatson "IndexedFreeList whose size is a multiple of requested size") \ 1698140628Srwatson \ 1699140628Srwatson product(bool, CMSLoopWarn, false, \ 1700140628Srwatson "Warn in case of excessive CMS looping") \ 1701140628Srwatson \ 1702140628Srwatson /* where does the range max value of (max_jint - 1) come from? */ \ 1703140628Srwatson product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ 1704140628Srwatson "Maximum size of marking stack") \ 1705140628Srwatson range(1, (max_jint - 1)) \ 1706140628Srwatson \ 1707140628Srwatson product(size_t, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ 1708140628Srwatson "Size of marking stack") \ 1709140628Srwatson constraint(MarkStackSizeConstraintFunc,AfterErgo) \ 1710140628Srwatson \ 1711140628Srwatson notproduct(bool, CMSMarkStackOverflowALot, false, \ 1712140628Srwatson "Simulate frequent marking stack / work queue overflow") \ 1713140628Srwatson \ 1714140628Srwatson notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \ 1715140628Srwatson "An \"interval\" counter that determines how frequently " \ 1716140628Srwatson "to simulate overflow; a smaller number increases frequency") \ 1717140628Srwatson \ 1718140628Srwatson product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ 1719140628Srwatson "Maximum number of abortable preclean iterations, if > 0") \ 1720140628Srwatson range(0, max_uintx) \ 1721140628Srwatson \ 1722140628Srwatson product(intx, CMSMaxAbortablePrecleanTime, 5000, \ 1723140628Srwatson "Maximum time in abortable preclean (in milliseconds)") \ 1724140628Srwatson range(0, max_intx) \ 1725140628Srwatson \ 1726140628Srwatson product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ 1727140628Srwatson "Nominal minimum work per abortable preclean iteration") \ 1728140628Srwatson range(0, max_uintx) \ 1729140628Srwatson \ 1730140628Srwatson manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ 1731140628Srwatson "Time that we sleep between iterations when not given " \ 1732140628Srwatson "enough work per iteration") \ 1733140628Srwatson range(0, max_intx) \ 1734140628Srwatson \ 1735140628Srwatson /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */ \ 1736140628Srwatson product(size_t, CMSRescanMultiple, 32, \ 1737140628Srwatson "Size (in cards) of CMS parallel rescan task") \ 1738140628Srwatson range(1, SIZE_MAX / 4096) \ 1739140628Srwatson constraint(CMSRescanMultipleConstraintFunc,AfterMemoryInit) \ 1740140628Srwatson \ 1741140628Srwatson /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */ \ 1742140628Srwatson product(size_t, CMSConcMarkMultiple, 32, \ 1743140628Srwatson "Size (in cards) of CMS concurrent MT marking task") \ 1744140628Srwatson range(1, SIZE_MAX / 4096) \ 1745140628Srwatson constraint(CMSConcMarkMultipleConstraintFunc,AfterMemoryInit) \ 1746140628Srwatson \ 1747140628Srwatson product(bool, CMSAbortSemantics, false, \ 1748140628Srwatson "Whether abort-on-overflow semantics is implemented") \ 1749140628Srwatson \ 1750140628Srwatson product(bool, CMSParallelInitialMarkEnabled, true, \ 1751140628Srwatson "Use the parallel initial mark.") \ 1752140628Srwatson \ 1753140628Srwatson product(bool, CMSParallelRemarkEnabled, true, \ 1754140628Srwatson "Whether parallel remark enabled (only if ParNewGC)") \ 1755140628Srwatson \ 1756140628Srwatson product(bool, CMSParallelSurvivorRemarkEnabled, true, \ 1757140628Srwatson "Whether parallel remark of survivor space " \ 1758140628Srwatson "enabled (effective only if CMSParallelRemarkEnabled)") \ 1759140628Srwatson \ 1760140628Srwatson product(bool, CMSPLABRecordAlways, true, \ 1761140628Srwatson "Always record survivor space PLAB boundaries (effective only " \ 1762140628Srwatson "if CMSParallelSurvivorRemarkEnabled)") \ 1763140628Srwatson \ 1764140628Srwatson product(bool, CMSEdenChunksRecordAlways, true, \ 1765140628Srwatson "Always record eden chunks used for the parallel initial mark " \ 1766140628Srwatson "or remark of eden") \ 1767140628Srwatson \ 1768140628Srwatson product(bool, CMSConcurrentMTEnabled, true, \ 1769140628Srwatson "Whether multi-threaded concurrent work enabled " \ 1770140628Srwatson "(effective only if ParNewGC)") \ 1771140628Srwatson \ 1772140628Srwatson product(bool, CMSPrecleaningEnabled, true, \ 1773140628Srwatson "Whether concurrent precleaning enabled") \ 1774140628Srwatson \ 1775140628Srwatson product(uintx, CMSPrecleanIter, 3, \ 1776140628Srwatson "Maximum number of precleaning iteration passes") \ 1777140628Srwatson range(0, 9) \ 1778140628Srwatson \ 1779140628Srwatson product(uintx, CMSPrecleanDenominator, 3, \ 1780140628Srwatson "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 1781140628Srwatson "ratio") \ 1782140628Srwatson range(1, max_uintx) \ 1783140628Srwatson constraint(CMSPrecleanDenominatorConstraintFunc,AfterErgo) \ 1784140628Srwatson \ 1785140628Srwatson product(uintx, CMSPrecleanNumerator, 2, \ 1786140628Srwatson "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 1787140628Srwatson "ratio") \ 1788140628Srwatson range(0, max_uintx-1) \ 1789140628Srwatson constraint(CMSPrecleanNumeratorConstraintFunc,AfterErgo) \ 1790140628Srwatson \ 1791140628Srwatson product(bool, CMSPrecleanRefLists1, true, \ 1792140628Srwatson "Preclean ref lists during (initial) preclean phase") \ 1793140628Srwatson \ 1794140628Srwatson product(bool, CMSPrecleanRefLists2, false, \ 1795140628Srwatson "Preclean ref lists during abortable preclean phase") \ 1796140628Srwatson \ 1797140628Srwatson product(bool, CMSPrecleanSurvivors1, false, \ 1798140628Srwatson "Preclean survivors during (initial) preclean phase") \ 1799140628Srwatson \ 1800140628Srwatson product(bool, CMSPrecleanSurvivors2, true, \ 1801140628Srwatson "Preclean survivors during abortable preclean phase") \ 1802140628Srwatson \ 1803140628Srwatson product(uintx, CMSPrecleanThreshold, 1000, \ 1804140628Srwatson "Do not iterate again if number of dirty cards is less than this")\ 1805140628Srwatson range(100, max_uintx) \ 1806140628Srwatson \ 1807140628Srwatson product(bool, CMSCleanOnEnter, true, \ 1808140628Srwatson "Clean-on-enter optimization for reducing number of dirty cards") \ 1809140628Srwatson \ 1810140628Srwatson product(uintx, CMSRemarkVerifyVariant, 1, \ 1811140628Srwatson "Choose variant (1,2) of verification following remark") \ 1812140628Srwatson range(1, 2) \ 1813140628Srwatson \ 1814140628Srwatson product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ 1815140628Srwatson "If Eden size is below this, do not try to schedule remark") \ 1816140628Srwatson range(0, max_uintx) \ 1817140628Srwatson \ 1818140628Srwatson product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ 1819140628Srwatson "The Eden occupancy percentage (0-100) at which " \ 1820140628Srwatson "to try and schedule remark pause") \ 1821140628Srwatson range(0, 100) \ 1822140628Srwatson \ 1823140628Srwatson product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ 1824140628Srwatson "Start sampling eden top at least before young gen " \ 1825140628Srwatson "occupancy reaches 1/<ratio> of the size at which " \ 1826140628Srwatson "we plan to schedule remark") \ 1827140628Srwatson range(1, max_uintx) \ 1828140628Srwatson \ 1829140628Srwatson product(uintx, CMSSamplingGrain, 16*K, \ 1830140628Srwatson "The minimum distance between eden samples for CMS (see above)") \ 1831140628Srwatson range(ObjectAlignmentInBytes, max_uintx) \ 1832140628Srwatson constraint(CMSSamplingGrainConstraintFunc,AfterMemoryInit) \ 1833140628Srwatson \ 1834140628Srwatson product(bool, CMSScavengeBeforeRemark, false, \ 1835140628Srwatson "Attempt scavenge before the CMS remark step") \ 1836140628Srwatson \ 1837140628Srwatson product(uintx, CMSWorkQueueDrainThreshold, 10, \ 1838140628Srwatson "Don't drain below this size per parallel worker/thief") \ 1839140628Srwatson range(1, max_juint) \ 1840140628Srwatson constraint(CMSWorkQueueDrainThresholdConstraintFunc,AfterErgo) \ 1841140628Srwatson \ 1842140628Srwatson manageable(intx, CMSWaitDuration, 2000, \ 1843140628Srwatson "Time in milliseconds that CMS thread waits for young GC") \ 1844140628Srwatson range(min_jint, max_jint) \ 1845140628Srwatson \ 1846140628Srwatson develop(uintx, CMSCheckInterval, 1000, \ 1847140628Srwatson "Interval in milliseconds that CMS thread checks if it " \ 1848140628Srwatson "should start a collection cycle") \ 1849140628Srwatson \ 1850140628Srwatson product(bool, CMSYield, true, \ 1851140628Srwatson "Yield between steps of CMS") \ 1852140628Srwatson \ 1853140628Srwatson product(size_t, CMSBitMapYieldQuantum, 10*M, \ 1854140628Srwatson "Bitmap operations should process at most this many bits " \ 1855140628Srwatson "between yields") \ 1856140628Srwatson range(1, max_uintx) \ 1857140628Srwatson constraint(CMSBitMapYieldQuantumConstraintFunc,AfterMemoryInit) \ 1858140628Srwatson \ 1859140628Srwatson product(bool, CMSPrintChunksInDump, false, \ 1860140628Srwatson "If logging for the \"gc\" and \"promotion\" tags is enabled on" \ 1861140628Srwatson "trace level include more detailed information about the" \ 1862140628Srwatson "free chunks") \ 1863140628Srwatson \ 1864140628Srwatson product(bool, CMSPrintObjectsInDump, false, \ 1865140628Srwatson "If logging for the \"gc\" and \"promotion\" tags is enabled on" \ 1866140628Srwatson "trace level include more detailed information about the" \ 1867140628Srwatson "allocated objects") \ 1868140628Srwatson \ 1869140628Srwatson diagnostic(bool, FLSVerifyAllHeapReferences, false, \ 1870140628Srwatson "Verify that all references across the FLS boundary " \ 1871140628Srwatson "are to valid objects") \ 1872140628Srwatson \ 1873140628Srwatson diagnostic(bool, FLSVerifyLists, false, \ 1874140628Srwatson "Do lots of (expensive) FreeListSpace verification") \ 1875140628Srwatson \ 1876140628Srwatson diagnostic(bool, FLSVerifyIndexTable, false, \ 1877140628Srwatson "Do lots of (expensive) FLS index table verification") \ 1878140628Srwatson \ 1879140628Srwatson develop(bool, FLSVerifyDictionary, false, \ 1880140628Srwatson "Do lots of (expensive) FLS dictionary verification") \ 1881140628Srwatson \ 1882140628Srwatson develop(bool, VerifyBlockOffsetArray, false, \ 1883140628Srwatson "Do (expensive) block offset array verification") \ 1884140628Srwatson \ 1885140628Srwatson diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ 1886110354Srwatson "Maintain _unallocated_block in BlockOffsetArray " \ 1887110354Srwatson "(currently applicable only to CMS collector)") \ 1888110354Srwatson \ 1889110354Srwatson product(intx, RefDiscoveryPolicy, 0, \ 1890110354Srwatson "Select type of reference discovery policy: " \ 1891110354Srwatson "reference-based(0) or referent-based(1)") \ 1892110354Srwatson range(ReferenceProcessor::DiscoveryPolicyMin, \ 1893110354Srwatson ReferenceProcessor::DiscoveryPolicyMax) \ 1894110354Srwatson \ 1895122524Srwatson product(bool, ParallelRefProcEnabled, false, \ 1896110354Srwatson "Enable parallel reference processing whenever possible") \ 1897110354Srwatson \ 1898110354Srwatson product(bool, ParallelRefProcBalancingEnabled, true, \ 1899110354Srwatson "Enable balancing of reference processing queues") \ 1900110354Srwatson \ 1901110354Srwatson product(uintx, CMSTriggerRatio, 80, \ 1902132232Srwatson "Percentage of MinHeapFreeRatio in CMS generation that is " \ 1903110354Srwatson "allocated before a CMS collection cycle commences") \ 1904110354Srwatson range(0, 100) \ 1905110354Srwatson \ 1906110354Srwatson product(uintx, CMSBootstrapOccupancy, 50, \ 1907110354Srwatson "Percentage CMS generation occupancy at which to " \ 1908110354Srwatson "initiate CMS collection for bootstrapping collection stats") \ 1909110354Srwatson range(0, 100) \ 1910110354Srwatson \ 1911110354Srwatson product(intx, CMSInitiatingOccupancyFraction, -1, \ 1912110354Srwatson "Percentage CMS generation occupancy to start a CMS collection " \ 1913110354Srwatson "cycle. A negative value means that CMSTriggerRatio is used") \ 1914110354Srwatson range(min_intx, 100) \ 1915110354Srwatson \ 1916110354Srwatson product(uintx, InitiatingHeapOccupancyPercent, 45, \ 1917122524Srwatson "The percent occupancy (IHOP) of the current old generation " \ 1918110354Srwatson "capacity above which a concurrent mark cycle will be initiated " \ 1919110354Srwatson "Its value may change over time if adaptive IHOP is enabled, " \ 1920110354Srwatson "otherwise the value remains constant. " \ 1921110354Srwatson "In the latter case a value of 0 will result as frequent as " \ 1922110354Srwatson "possible concurrent marking cycles. A value of 100 disables " \ 1923101099Srwatson "concurrent marking. " \ 1924101099Srwatson "Fragmentation waste in the old generation is not considered " \ 1925101099Srwatson "free space in this calculation. (G1 collector only)") \ 1926101099Srwatson range(0, 100) \ 1927101099Srwatson \ 1928101099Srwatson manageable(intx, CMSTriggerInterval, -1, \ 1929101099Srwatson "Commence a CMS collection cycle (at least) every so many " \ 1930101099Srwatson "milliseconds (0 permanently, -1 disabled)") \ 1931122524Srwatson range(-1, max_intx) \ 1932101099Srwatson \ 1933101099Srwatson product(bool, UseCMSInitiatingOccupancyOnly, false, \ 1934132232Srwatson "Only use occupancy as a criterion for starting a CMS collection")\ 1935101099Srwatson \ 1936101099Srwatson product(uintx, CMSIsTooFullPercentage, 98, \ 1937101099Srwatson "An absolute ceiling above which CMS will always consider the " \ 1938101099Srwatson "unloading of classes when class unloading is enabled") \ 1939101099Srwatson range(0, 100) \ 1940101099Srwatson \ 1941125293Srwatson develop(bool, CMSTestInFreeList, false, \ 1942101099Srwatson "Check if the coalesced range is already in the " \ 1943101099Srwatson "free lists as claimed") \ 1944103759Srwatson \ 1945101099Srwatson notproduct(bool, CMSVerifyReturnedBytes, false, \ 1946101099Srwatson "Check that all the garbage collected was returned to the " \ 1947101099Srwatson "free lists") \ 1948101099Srwatson \ 1949101099Srwatson notproduct(bool, ScavengeALot, false, \ 1950101099Srwatson "Force scavenge at every Nth exit from the runtime system " \ 1951101099Srwatson "(N=ScavengeALotInterval)") \ 1952101099Srwatson \ 1953101099Srwatson develop(bool, FullGCALot, false, \ 1954125293Srwatson "Force full gc at every Nth exit from the runtime system " \ 1955102115Srwatson "(N=FullGCALotInterval)") \ 1956101099Srwatson \ 1957101099Srwatson notproduct(bool, GCALotAtAllSafepoints, false, \ 1958101099Srwatson "Enforce ScavengeALot/GCALot at all potential safepoints") \ 1959101099Srwatson \ 1960101099Srwatson notproduct(bool, PromotionFailureALot, false, \ 1961101099Srwatson "Use promotion failure handling on every youngest generation " \ 1962122524Srwatson "collection") \ 1963101099Srwatson \ 1964101099Srwatson develop(uintx, PromotionFailureALotCount, 1000, \ 1965132232Srwatson "Number of promotion failures occurring at PLAB " \ 1966102115Srwatson "refill attempts (ParNew) or promotion attempts " \ 1967101099Srwatson "(other young collectors)") \ 1968101099Srwatson \ 1969101099Srwatson develop(uintx, PromotionFailureALotInterval, 5, \ 1970101099Srwatson "Total collections between promotion failures a lot") \ 1971101099Srwatson \ 1972125293Srwatson experimental(uintx, WorkStealingSleepMillis, 1, \ 1973102115Srwatson "Sleep time when sleep is used for yields") \ 1974102115Srwatson \ 1975102115Srwatson experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \ 1976102115Srwatson "Number of yields before a sleep is done during work stealing") \ 1977102115Srwatson \ 1978102115Srwatson experimental(uintx, WorkStealingHardSpins, 4096, \ 1979102115Srwatson "Number of iterations in a spin loop between checks on " \ 1980122524Srwatson "time out of hard spin") \ 1981102115Srwatson \ 1982102115Srwatson experimental(uintx, WorkStealingSpinToYieldRatio, 10, \ 1983132232Srwatson "Ratio of hard spins to calls to yield") \ 1984102115Srwatson \ 1985102115Srwatson develop(uintx, ObjArrayMarkingStride, 2048, \ 1986102115Srwatson "Number of object array elements to push onto the marking stack " \ 1987102115Srwatson "before pushing a continuation entry") \ 1988102115Srwatson \ 1989102115Srwatson develop(bool, MetadataAllocationFailALot, false, \ 1990125293Srwatson "Fail metadata allocations at intervals controlled by " \ 1991101099Srwatson "MetadataAllocationFailALotInterval") \ 1992101099Srwatson \ 1993101099Srwatson develop(uintx, MetadataAllocationFailALotInterval, 1000, \ 1994105634Srwatson "Metadata allocation failure a lot interval") \ 1995101099Srwatson \ 1996101099Srwatson notproduct(bool, ExecuteInternalVMTests, false, \ 1997122524Srwatson "Enable execution of internal VM tests") \ 1998101099Srwatson \ 1999101099Srwatson notproduct(bool, VerboseInternalVMTests, false, \ 2000101099Srwatson "Turn on logging for internal VM tests.") \ 2001105634Srwatson \ 2002132232Srwatson product(bool, ExecutingUnitTests, false, \ 2003101099Srwatson "Whether the JVM is running unit tests or not") \ 2004132232Srwatson \ 2005105634Srwatson product_pd(bool, UseTLAB, "Use thread-local object allocation") \ 2006105634Srwatson \ 2007101099Srwatson product_pd(bool, ResizeTLAB, \ 2008101099Srwatson "Dynamically resize TLAB size for threads") \ 2009105634Srwatson \ 2010105634Srwatson product(bool, ZeroTLAB, false, \ 2011101099Srwatson "Zero out the newly created TLAB") \ 2012132232Srwatson \ 2013101099Srwatson product(bool, FastTLABRefill, true, \ 2014101099Srwatson "Use fast TLAB refill code") \ 2015101099Srwatson \ 2016105634Srwatson product(bool, TLABStats, true, \ 2017101099Srwatson "Provide more detailed and expensive TLAB statistics.") \ 2018132232Srwatson \ 2019105634Srwatson product_pd(bool, NeverActAsServerClassMachine, \ 2020105634Srwatson "Never act like a server-class machine") \ 2021105634Srwatson \ 2022105634Srwatson product(bool, AlwaysActAsServerClassMachine, false, \ 2023132232Srwatson "Always act like a server-class machine") \ 2024105634Srwatson \ 2025101099Srwatson product_pd(uint64_t, MaxRAM, \ 2026105634Srwatson "Real memory size (in bytes) used to set maximum heap size") \ 2027105634Srwatson range(0, 0XFFFFFFFFFFFFFFFF) \ 2028105634Srwatson \ 2029105634Srwatson product(size_t, ErgoHeapSizeLimit, 0, \ 2030105634Srwatson "Maximum ergonomically set heap size (in bytes); zero means use " \ 2031106090Srwatson "MaxRAM / MaxRAMFraction") \ 2032105634Srwatson range(0, max_uintx) \ 2033105634Srwatson \ 2034105634Srwatson experimental(bool, UseCGroupMemoryLimitForHeap, false, \ 2035105634Srwatson "Use CGroup memory limit as physical memory limit for heap " \ 2036105634Srwatson "sizing") \ 2037101099Srwatson \ 2038101099Srwatson product(uintx, MaxRAMFraction, 4, \ 2039101099Srwatson "Maximum fraction (1/n) of real memory used for maximum heap " \ 2040101099Srwatson "size") \ 2041125293Srwatson range(1, max_uintx) \ 2042102115Srwatson \ 2043102115Srwatson product(uintx, MinRAMFraction, 2, \ 2044102115Srwatson "Minimum fraction (1/n) of real memory used for maximum heap " \ 2045102115Srwatson "size on systems with small physical memory size") \ 2046102115Srwatson range(1, max_uintx) \ 2047102115Srwatson \ 2048102115Srwatson product(uintx, InitialRAMFraction, 64, \ 2049122524Srwatson "Fraction (1/n) of real memory used for initial heap size") \ 2050102115Srwatson range(1, max_uintx) \ 2051102115Srwatson \ 2052132232Srwatson develop(uintx, MaxVirtMemFraction, 2, \ 2053102115Srwatson "Maximum fraction (1/n) of virtual memory used for ergonomically "\ 2054102115Srwatson "determining maximum heap size") \ 2055102115Srwatson \ 2056102115Srwatson product(bool, UseAdaptiveSizePolicy, true, \ 2057102115Srwatson "Use adaptive generation sizing policies") \ 2058102115Srwatson \ 2059125293Srwatson product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \ 2060102115Srwatson "Use adaptive survivor sizing policies") \ 2061102115Srwatson \ 2062102115Srwatson product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \ 2063102115Srwatson "Use adaptive young-old sizing policies at minor collections") \ 2064102115Srwatson \ 2065102115Srwatson product(bool, UseAdaptiveGenerationSizePolicyAtMajorCollection, true, \ 2066102115Srwatson "Use adaptive young-old sizing policies at major collections") \ 2067122524Srwatson \ 2068102115Srwatson product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ 2069102115Srwatson "Include statistics from System.gc() for adaptive size policy") \ 2070132232Srwatson \ 2071102115Srwatson product(bool, UseAdaptiveGCBoundary, false, \ 2072102115Srwatson "Allow young-old boundary to move") \ 2073102115Srwatson \ 2074102115Srwatson develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1, \ 2075102115Srwatson "Resize the virtual spaces of the young or old generations") \ 2076102115Srwatson range(-1, 1) \ 2077145855Srwatson \ 2078145855Srwatson product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ 2079145855Srwatson "Policy for changing generation size for throughput goals") \ 2080145855Srwatson range(0, 1) \ 2081145855Srwatson \ 2082145855Srwatson product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ 2083145855Srwatson "Number of steps where heuristics is used before data is used") \ 2084145855Srwatson range(0, max_uintx) \ 2085145855Srwatson \ 2086145855Srwatson develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \ 2087145855Srwatson "Number of collections before the adaptive sizing is started") \ 2088145855Srwatson \ 2089145855Srwatson product(uintx, AdaptiveSizePolicyOutputInterval, 0, \ 2090145855Srwatson "Collection interval for printing information; zero means never") \ 2091145855Srwatson range(0, max_uintx) \ 2092145855Srwatson \ 2093145855Srwatson product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \ 2094145855Srwatson "Use adaptive minimum footprint as a goal") \ 2095145855Srwatson \ 2096145855Srwatson product(uintx, AdaptiveSizePolicyWeight, 10, \ 2097145855Srwatson "Weight given to exponential resizing, between 0 and 100") \ 2098145855Srwatson range(0, 100) \ 2099145855Srwatson \ 2100145855Srwatson product(uintx, AdaptiveTimeWeight, 25, \ 2101145855Srwatson "Weight given to time in adaptive policy, between 0 and 100") \ 2102145855Srwatson range(0, 100) \ 2103145855Srwatson \ 2104145855Srwatson product(uintx, PausePadding, 1, \ 2105145855Srwatson "How much buffer to keep for pause time") \ 2106145855Srwatson range(0, max_juint) \ 2107145855Srwatson \ 2108145855Srwatson product(uintx, PromotedPadding, 3, \ 2109145855Srwatson "How much buffer to keep for promotion failure") \ 2110145855Srwatson range(0, max_juint) \ 2111145855Srwatson \ 2112145855Srwatson product(uintx, SurvivorPadding, 3, \ 2113101099Srwatson "How much buffer to keep for survivor overflow") \ 2114101099Srwatson range(0, max_juint) \ 2115101099Srwatson \ 2116101099Srwatson product(uintx, ThresholdTolerance, 10, \ 2117101099Srwatson "Allowed collection cost difference between generations") \ 2118101099Srwatson range(0, 100) \ 2119101099Srwatson \ 2120122524Srwatson product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \ 2121122524Srwatson "If collection costs are within margin, reduce both by full " \ 2122101099Srwatson "delta") \ 2123101099Srwatson range(0, 100) \ 2124132232Srwatson \ 2125101099Srwatson product(uintx, YoungGenerationSizeIncrement, 20, \ 2126132232Srwatson "Adaptive size percentage change in young generation") \ 2127101099Srwatson range(0, 100) \ 2128101099Srwatson \ 2129101099Srwatson product(uintx, YoungGenerationSizeSupplement, 80, \ 2130101099Srwatson "Supplement to YoungedGenerationSizeIncrement used at startup") \ 2131101099Srwatson range(0, 100) \ 2132101099Srwatson \ 2133101099Srwatson product(uintx, YoungGenerationSizeSupplementDecay, 8, \ 2134101099Srwatson "Decay factor to YoungedGenerationSizeSupplement") \ 2135101099Srwatson range(1, max_uintx) \ 2136103759Srwatson \ 2137101099Srwatson product(uintx, TenuredGenerationSizeIncrement, 20, \ 2138101099Srwatson "Adaptive size percentage change in tenured generation") \ 2139101099Srwatson range(0, 100) \ 2140122524Srwatson \ 2141122524Srwatson product(uintx, TenuredGenerationSizeSupplement, 80, \ 2142103759Srwatson "Supplement to TenuredGenerationSizeIncrement used at startup") \ 2143101099Srwatson range(0, 100) \ 2144132232Srwatson \ 2145101099Srwatson product(uintx, TenuredGenerationSizeSupplementDecay, 2, \ 2146132232Srwatson "Decay factor to TenuredGenerationSizeIncrement") \ 2147101099Srwatson range(1, max_uintx) \ 2148101099Srwatson \ 2149101099Srwatson product(uintx, MaxGCPauseMillis, max_uintx - 1, \ 2150101099Srwatson "Adaptive size policy maximum GC pause time goal in millisecond, "\ 2151101099Srwatson "or (G1 Only) the maximum GC time per MMU time slice") \ 2152101099Srwatson range(1, max_uintx - 1) \ 2153101099Srwatson constraint(MaxGCPauseMillisConstraintFunc,AfterErgo) \ 2154101099Srwatson \ 2155101099Srwatson product(uintx, GCPauseIntervalMillis, 0, \ 2156103759Srwatson "Time slice for MMU specification") \ 2157101099Srwatson constraint(GCPauseIntervalMillisConstraintFunc,AfterErgo) \ 2158101099Srwatson \ 2159101099Srwatson product(uintx, MaxGCMinorPauseMillis, max_uintx, \ 2160122524Srwatson "Adaptive size policy maximum GC minor pause time goal " \ 2161122524Srwatson "in millisecond") \ 2162103759Srwatson range(0, max_uintx) \ 2163101099Srwatson \ 2164132232Srwatson product(uintx, GCTimeRatio, 99, \ 2165101099Srwatson "Adaptive size policy application time to GC time ratio") \ 2166132232Srwatson range(0, max_juint) \ 2167101099Srwatson \ 2168101099Srwatson product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \ 2169101099Srwatson "Adaptive size scale down factor for shrinking") \ 2170101099Srwatson range(1, max_uintx) \ 2171101099Srwatson \ 2172101099Srwatson product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \ 2173101934Srwatson "Adaptive size decays the major cost for long major intervals") \ 2174101099Srwatson \ 2175101099Srwatson product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \ 2176101099Srwatson "Time scale over which major costs decay") \ 2177101099Srwatson range(0, max_uintx) \ 2178101099Srwatson \ 2179101099Srwatson product(uintx, MinSurvivorRatio, 3, \ 2180101099Srwatson "Minimum ratio of young generation/survivor space size") \ 2181101099Srwatson range(3, max_uintx) \ 2182101099Srwatson \ 2183101099Srwatson product(uintx, InitialSurvivorRatio, 8, \ 2184132232Srwatson "Initial ratio of young generation/survivor space size") \ 2185101099Srwatson range(0, max_uintx) \ 2186101099Srwatson \ 2187101099Srwatson product(size_t, BaseFootPrintEstimate, 256*M, \ 2188106214Srwatson "Estimate of footprint other than Java Heap") \ 2189101099Srwatson range(0, max_uintx) \ 2190101099Srwatson \ 2191101099Srwatson product(bool, UseGCOverheadLimit, true, \ 2192105634Srwatson "Use policy to limit of proportion of time spent in GC " \ 2193101099Srwatson "before an OutOfMemory error is thrown") \ 2194101099Srwatson \ 2195122524Srwatson product(uintx, GCTimeLimit, 98, \ 2196101099Srwatson "Limit of the proportion of time spent in GC before " \ 2197101099Srwatson "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \ 2198101099Srwatson range(0, 100) \ 2199105634Srwatson \ 2200132232Srwatson product(uintx, GCHeapFreeLimit, 2, \ 2201101099Srwatson "Minimum percentage of free space after a full GC before an " \ 2202132232Srwatson "OutOfMemoryError is thrown (used with GCTimeLimit)") \ 2203105634Srwatson range(0, 100) \ 2204105634Srwatson \ 2205101099Srwatson develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5, \ 2206101099Srwatson "Number of consecutive collections before gc time limit fires") \ 2207132232Srwatson range(1, max_uintx) \ 2208101099Srwatson \ 2209101099Srwatson product(intx, PrefetchCopyIntervalInBytes, -1, \ 2210132232Srwatson "How far ahead to prefetch destination area (<= 0 means off)") \ 2211101099Srwatson range(-1, max_jint) \ 2212101099Srwatson \ 2213101099Srwatson product(intx, PrefetchScanIntervalInBytes, -1, \ 2214105634Srwatson "How far ahead to prefetch scan area (<= 0 means off)") \ 2215101099Srwatson range(-1, max_jint) \ 2216132232Srwatson \ 2217105634Srwatson product(intx, PrefetchFieldsAhead, -1, \ 2218132232Srwatson "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ 2219105634Srwatson range(-1, max_jint) \ 2220105634Srwatson \ 2221132232Srwatson diagnostic(bool, VerifyDuringStartup, false, \ 2222105634Srwatson "Verify memory system before executing any Java code " \ 2223101099Srwatson "during VM initialization") \ 2224105634Srwatson \ 2225105634Srwatson diagnostic(bool, VerifyBeforeExit, trueInDebug, \ 2226105634Srwatson "Verify system before exiting") \ 2227105634Srwatson \ 2228105634Srwatson diagnostic(bool, VerifyBeforeGC, false, \ 2229106090Srwatson "Verify memory system before GC") \ 2230105634Srwatson \ 2231105634Srwatson diagnostic(bool, VerifyAfterGC, false, \ 2232105634Srwatson "Verify memory system after GC") \ 2233105634Srwatson \ 2234105634Srwatson diagnostic(bool, VerifyDuringGC, false, \ 2235101099Srwatson "Verify memory system during GC (between phases)") \ 2236101099Srwatson \ 2237101099Srwatson diagnostic(ccstrlist, VerifySubSet, "", \ 2238101099Srwatson "Memory sub-systems to verify when Verify*GC flag(s) " \ 2239101099Srwatson "are enabled. One or more sub-systems can be specified " \ 2240101099Srwatson "in a comma separated string. Sub-systems are: " \ 2241101099Srwatson "threads, heap, symbol_table, string_table, codecache, " \ 2242101099Srwatson "dictionary, classloader_data_graph, metaspace, jni_handles, " \ 2243101099Srwatson "codecache_oops") \ 2244105722Srwatson \ 2245105722Srwatson diagnostic(bool, GCParallelVerificationEnabled, true, \ 2246105722Srwatson "Enable parallel memory system verification") \ 2247122524Srwatson \ 2248101099Srwatson diagnostic(bool, DeferInitialCardMark, false, \ 2249101099Srwatson "When +ReduceInitialCardMarks, explicitly defer any that " \ 2250132232Srwatson "may arise from new_pre_store_barrier") \ 2251101099Srwatson \ 2252101099Srwatson product(bool, UseCondCardMark, false, \ 2253101099Srwatson "Check for already marked card before updating card table") \ 2254101099Srwatson \ 2255101099Srwatson diagnostic(bool, VerifyRememberedSets, false, \ 2256101099Srwatson "Verify GC remembered sets") \ 2257112574Srwatson \ 2258112574Srwatson diagnostic(bool, VerifyObjectStartArray, true, \ 2259112574Srwatson "Verify GC object start array if verify before/after") \ 2260112574Srwatson \ 2261112574Srwatson product(bool, DisableExplicitGC, false, \ 2262112574Srwatson "Ignore calls to System.gc()") \ 2263112574Srwatson \ 2264112574Srwatson notproduct(bool, CheckMemoryInitialization, false, \ 2265122524Srwatson "Check memory initialization") \ 2266112574Srwatson \ 2267112574Srwatson diagnostic(bool, BindCMSThreadToCPU, false, \ 2268112574Srwatson "Bind CMS Thread to CPU if possible") \ 2269112574Srwatson \ 2270112574Srwatson diagnostic(uintx, CPUForCMSThread, 0, \ 2271112574Srwatson "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \ 2272112574Srwatson range(0, max_juint) \ 2273112574Srwatson \ 2274112574Srwatson product(bool, BindGCTaskThreadsToCPUs, false, \ 2275106418Srwatson "Bind GCTaskThreads to CPUs if possible") \ 2276106418Srwatson \ 2277106418Srwatson product(bool, UseGCTaskAffinity, false, \ 2278106418Srwatson "Use worker affinity when asking for GCTasks") \ 2279106418Srwatson \ 2280106418Srwatson product(uintx, ProcessDistributionStride, 4, \ 2281106418Srwatson "Stride through processors when distributing processes") \ 2282106418Srwatson range(0, max_juint) \ 2283106418Srwatson \ 2284122524Srwatson product(uintx, CMSCoordinatorYieldSleepCount, 10, \ 2285106418Srwatson "Number of times the coordinator GC thread will sleep while " \ 2286106418Srwatson "yielding before giving up and resuming GC") \ 2287106418Srwatson range(0, max_juint) \ 2288106418Srwatson \ 2289106418Srwatson product(uintx, CMSYieldSleepCount, 0, \ 2290106418Srwatson "Number of times a GC thread (minus the coordinator) " \ 2291106418Srwatson "will sleep while yielding before giving up and resuming GC") \ 2292106418Srwatson range(0, max_juint) \ 2293106418Srwatson \ 2294132232Srwatson product(bool, PrintGC, false, \ 2295106418Srwatson "Print message at garbage collection. " \ 2296106418Srwatson "Deprecated, use -Xlog:gc instead.") \ 2297106418Srwatson \ 2298106418Srwatson product(bool, PrintGCDetails, false, \ 2299106418Srwatson "Print more details at garbage collection. " \ 2300106418Srwatson "Deprecated, use -Xlog:gc* instead.") \ 2301106418Srwatson \ 2302106418Srwatson develop(intx, ConcGCYieldTimeout, 0, \ 2303106418Srwatson "If non-zero, assert that GC threads yield within this " \ 2304106418Srwatson "number of milliseconds") \ 2305106418Srwatson range(0, max_intx) \ 2306106418Srwatson \ 2307106418Srwatson develop(bool, TraceFinalizerRegistration, false, \ 2308106418Srwatson "Trace registration of final references") \ 2309122524Srwatson \ 2310106418Srwatson notproduct(bool, TraceScavenge, false, \ 2311106418Srwatson "Trace scavenge") \ 2312106418Srwatson \ 2313106418Srwatson product(bool, IgnoreEmptyClassPaths, false, \ 2314106418Srwatson "Ignore empty path elements in -classpath") \ 2315106418Srwatson \ 2316106418Srwatson product(size_t, InitialBootClassLoaderMetaspaceSize, \ 2317106418Srwatson NOT_LP64(2200*K) LP64_ONLY(4*M), \ 2318106418Srwatson "Initial size of the boot class loader data metaspace") \ 2319106161Srwatson range(30*K, max_uintx/BytesPerWord) \ 2320106161Srwatson constraint(InitialBootClassLoaderMetaspaceSizeConstraintFunc, AfterErgo)\ 2321106161Srwatson \ 2322106161Srwatson product(bool, TraceYoungGenTime, false, \ 2323106416Srwatson "Trace accumulated time for young collection") \ 2324106161Srwatson \ 2325106161Srwatson product(bool, TraceOldGenTime, false, \ 2326106161Srwatson "Trace accumulated time for old collection") \ 2327106161Srwatson \ 2328122524Srwatson product(bool, PrintHeapAtSIGBREAK, true, \ 2329106161Srwatson "Print heap layout in response to SIGBREAK") \ 2330106161Srwatson \ 2331106416Srwatson manageable(bool, PrintClassHistogram, false, \ 2332106416Srwatson "Print a histogram of class instances") \ 2333106416Srwatson \ 2334106161Srwatson develop(bool, TraceGCTaskManager, false, \ 2335132232Srwatson "Trace actions of the GC task manager") \ 2336106161Srwatson \ 2337106161Srwatson develop(bool, TraceGCTaskQueue, false, \ 2338106161Srwatson "Trace actions of the GC task queues") \ 2339106161Srwatson \ 2340106161Srwatson develop(bool, TraceParallelOldGCMarkingPhase, false, \ 2341106161Srwatson "Trace marking phase in ParallelOldGC") \ 2342112574Srwatson \ 2343112574Srwatson develop(bool, TraceParallelOldGCDensePrefix, false, \ 2344112574Srwatson "Trace dense prefix computation for ParallelOldGC") \ 2345112574Srwatson \ 2346112574Srwatson develop(bool, IgnoreLibthreadGPFault, false, \ 2347112574Srwatson "Suppress workaround for libthread GP fault") \ 2348112574Srwatson \ 2349112574Srwatson experimental(double, ObjectCountCutOffPercent, 0.5, \ 2350112574Srwatson "The percentage of the used heap that the instances of a class " \ 2351122524Srwatson "must occupy for the class to generate a trace event") \ 2352112574Srwatson range(0.0, 100.0) \ 2353112574Srwatson \ 2354112574Srwatson /* JVMTI heap profiling */ \ 2355112574Srwatson \ 2356112574Srwatson diagnostic(bool, TraceJVMTIObjectTagging, false, \ 2357112574Srwatson "Trace JVMTI object tagging calls") \ 2358112574Srwatson \ 2359112574Srwatson diagnostic(bool, VerifyBeforeIteration, false, \ 2360112574Srwatson "Verify memory system before JVMTI iteration") \ 2361112574Srwatson \ 2362126121Spjd /* compiler interface */ \ 2363126121Spjd \ 2364106161Srwatson develop(bool, CIPrintCompilerName, false, \ 2365106161Srwatson "when CIPrint is active, print the name of the active compiler") \ 2366106161Srwatson \ 2367106161Srwatson diagnostic(bool, CIPrintCompileQueue, false, \ 2368106161Srwatson "display the contents of the compile queue whenever a " \ 2369106161Srwatson "compilation is enqueued") \ 2370106161Srwatson \ 2371122524Srwatson develop(bool, CIPrintRequests, false, \ 2372106161Srwatson "display every request for compilation") \ 2373106161Srwatson \ 2374126121Spjd product(bool, CITime, false, \ 2375126121Spjd "collect timing information for compilation") \ 2376106161Srwatson \ 2377126121Spjd develop(bool, CITimeVerbose, false, \ 2378106161Srwatson "be more verbose in compilation timings") \ 2379106161Srwatson \ 2380106161Srwatson develop(bool, CITimeEach, false, \ 2381106161Srwatson "display timing information after each successful compilation") \ 2382106161Srwatson \ 2383106161Srwatson develop(bool, CICountOSR, false, \ 2384106161Srwatson "use a separate counter when assigning ids to osr compilations") \ 2385106161Srwatson \ 2386106161Srwatson develop(bool, CICompileNatives, true, \ 2387106161Srwatson "compile native methods if supported by the compiler") \ 2388106161Srwatson \ 2389106161Srwatson develop_pd(bool, CICompileOSR, \ 2390101099Srwatson "compile on stack replacement methods if supported by the " \ 2391101099Srwatson "compiler") \ 2392101099Srwatson \ 2393101099Srwatson develop(bool, CIPrintMethodCodes, false, \ 2394101099Srwatson "print method bytecodes of the compiled code") \ 2395101099Srwatson \ 2396101099Srwatson develop(bool, CIPrintTypeFlow, false, \ 2397101099Srwatson "print the results of ciTypeFlow analysis") \ 2398122524Srwatson \ 2399101099Srwatson develop(bool, CITraceTypeFlow, false, \ 2400101099Srwatson "detailed per-bytecode tracing of ciTypeFlow analysis") \ 2401132232Srwatson \ 2402101099Srwatson develop(intx, OSROnlyBCI, -1, \ 2403101099Srwatson "OSR only at this bci. Negative values mean exclude that bci") \ 2404101099Srwatson \ 2405101099Srwatson /* compiler */ \ 2406101099Srwatson \ 2407101099Srwatson /* notice: the max range value here is max_jint, not max_intx */ \ 2408101099Srwatson /* because of overflow issue */ \ 2409101099Srwatson product(intx, CICompilerCount, CI_COMPILER_COUNT, \ 2410101099Srwatson "Number of compiler threads to run") \ 2411101099Srwatson range(0, max_jint) \ 2412101099Srwatson constraint(CICompilerCountConstraintFunc, AfterErgo) \ 2413101099Srwatson \ 2414101099Srwatson product(intx, CompilationPolicyChoice, 0, \ 2415101099Srwatson "which compilation policy (0-3)") \ 2416122524Srwatson range(0, 3) \ 2417101099Srwatson \ 2418101099Srwatson develop(bool, UseStackBanging, true, \ 2419132232Srwatson "use stack banging for stack overflow checks (required for " \ 2420101099Srwatson "proper StackOverflow handling; disable only to measure cost " \ 2421101099Srwatson "of stackbanging)") \ 2422101099Srwatson \ 2423101099Srwatson develop(bool, UseStrictFP, true, \ 2424101099Srwatson "use strict fp if modifier strictfp is set") \ 2425101099Srwatson \ 2426101099Srwatson develop(bool, GenerateSynchronizationCode, true, \ 2427101099Srwatson "generate locking/unlocking code for synchronized methods and " \ 2428101099Srwatson "monitors") \ 2429101099Srwatson \ 2430101099Srwatson develop(bool, GenerateRangeChecks, true, \ 2431101099Srwatson "Generate range checks for array accesses") \ 2432101099Srwatson \ 2433101099Srwatson diagnostic_pd(bool, ImplicitNullChecks, \ 2434122524Srwatson "Generate code for implicit null checks") \ 2435101099Srwatson \ 2436101099Srwatson product_pd(bool, TrapBasedNullChecks, \ 2437132232Srwatson "Generate code for null checks that uses a cmp and trap " \ 2438101099Srwatson "instruction raising SIGTRAP. This is only used if an access to" \ 2439101099Srwatson "null (+offset) will not raise a SIGSEGV, i.e.," \ 2440101099Srwatson "ImplicitNullChecks don't work (PPC64).") \ 2441101099Srwatson \ 2442101099Srwatson product(bool, PrintSafepointStatistics, false, \ 2443101099Srwatson "Print statistics about safepoint synchronization") \ 2444101099Srwatson \ 2445101099Srwatson product(intx, PrintSafepointStatisticsCount, 300, \ 2446101099Srwatson "Total number of safepoint statistics collected " \ 2447101099Srwatson "before printing them out") \ 2448101099Srwatson range(1, max_intx) \ 2449101099Srwatson \ 2450101099Srwatson product(intx, PrintSafepointStatisticsTimeout, -1, \ 2451101099Srwatson "Print safepoint statistics only when safepoint takes " \ 2452101099Srwatson "more than PrintSafepointSatisticsTimeout in millis") \ 2453122524Srwatson LP64_ONLY(range(-1, max_intx/MICROUNITS)) \ 2454101099Srwatson NOT_LP64(range(-1, max_intx)) \ 2455101099Srwatson \ 2456132232Srwatson product(bool, Inline, true, \ 2457101099Srwatson "Enable inlining") \ 2458101099Srwatson \ 2459101099Srwatson product(bool, ClipInlining, true, \ 2460101099Srwatson "Clip inlining if aggregate method exceeds DesiredMethodLimit") \ 2461132232Srwatson \ 2462101099Srwatson develop(bool, UseCHA, true, \ 2463101099Srwatson "Enable CHA") \ 2464101099Srwatson \ 2465101099Srwatson product(bool, UseTypeProfile, true, \ 2466101099Srwatson "Check interpreter profile for historically monomorphic calls") \ 2467101099Srwatson \ 2468101099Srwatson diagnostic(bool, PrintInlining, false, \ 2469101099Srwatson "Print inlining optimizations") \ 2470101099Srwatson \ 2471101099Srwatson product(bool, UsePopCountInstruction, false, \ 2472101099Srwatson "Use population count instruction") \ 2473101099Srwatson \ 2474101099Srwatson develop(bool, EagerInitialization, false, \ 2475101099Srwatson "Eagerly initialize classes if possible") \ 2476122524Srwatson \ 2477101099Srwatson diagnostic(bool, LogTouchedMethods, false, \ 2478101099Srwatson "Log methods which have been ever touched in runtime") \ 2479132232Srwatson \ 2480101099Srwatson diagnostic(bool, PrintTouchedMethodsAtExit, false, \ 2481101099Srwatson "Print all methods that have been ever touched in runtime") \ 2482101099Srwatson \ 2483101099Srwatson develop(bool, TraceMethodReplacement, false, \ 2484101099Srwatson "Print when methods are replaced do to recompilation") \ 2485101099Srwatson \ 2486119202Srwatson develop(bool, PrintMethodFlushing, false, \ 2487119202Srwatson "Print the nmethods being flushed") \ 2488119202Srwatson \ 2489119202Srwatson diagnostic(bool, PrintMethodFlushingStatistics, false, \ 2490119202Srwatson "print statistics about method flushing") \ 2491119202Srwatson \ 2492119202Srwatson diagnostic(intx, HotMethodDetectionLimit, 100000, \ 2493119202Srwatson "Number of compiled code invocations after which " \ 2494122524Srwatson "the method is considered as hot by the flusher") \ 2495119202Srwatson range(1, max_jint) \ 2496119202Srwatson \ 2497132232Srwatson diagnostic(intx, MinPassesBeforeFlush, 10, \ 2498119202Srwatson "Minimum number of sweeper passes before an nmethod " \ 2499119202Srwatson "can be flushed") \ 2500119202Srwatson range(0, max_intx) \ 2501119202Srwatson \ 2502119202Srwatson product(bool, UseCodeAging, true, \ 2503119202Srwatson "Insert counter to detect warm methods") \ 2504101099Srwatson \ 2505106648Srwatson diagnostic(bool, StressCodeAging, false, \ 2506106648Srwatson "Start with counters compiled in") \ 2507101099Srwatson \ 2508106648Srwatson develop(bool, StressCodeBuffers, false, \ 2509106648Srwatson "Exercise code buffer expansion and other rare state changes") \ 2510101099Srwatson \ 2511106648Srwatson diagnostic(bool, DebugNonSafepoints, trueInDebug, \ 2512106648Srwatson "Generate extra debugging information for non-safepoints in " \ 2513106648Srwatson "nmethods") \ 2514106648Srwatson \ 2515106648Srwatson product(bool, PrintVMOptions, false, \ 2516106648Srwatson "Print flags that appeared on the command line") \ 2517106648Srwatson \ 2518106648Srwatson product(bool, IgnoreUnrecognizedVMOptions, false, \ 2519106648Srwatson "Ignore unrecognized VM options") \ 2520106648Srwatson \ 2521106648Srwatson product(bool, PrintCommandLineFlags, false, \ 2522106648Srwatson "Print flags specified on command line or set by ergonomics") \ 2523101099Srwatson \ 2524101099Srwatson product(bool, PrintFlagsInitial, false, \ 2525101099Srwatson "Print all VM flags before argument processing and exit VM") \ 2526122524Srwatson \ 2527101099Srwatson product(bool, PrintFlagsFinal, false, \ 2528101099Srwatson "Print all VM flags after argument and ergonomic processing") \ 2529132232Srwatson \ 2530101099Srwatson notproduct(bool, PrintFlagsWithComments, false, \ 2531101099Srwatson "Print all VM flags with default values and descriptions and " \ 2532101099Srwatson "exit") \ 2533101099Srwatson \ 2534101099Srwatson product(bool, PrintFlagsRanges, false, \ 2535101099Srwatson "Print VM flags and their ranges and exit VM") \ 2536101099Srwatson \ 2537101099Srwatson diagnostic(bool, SerializeVMOutput, true, \ 2538101099Srwatson "Use a mutex to serialize output to tty and LogFile") \ 2539101099Srwatson \ 2540101099Srwatson diagnostic(bool, DisplayVMOutput, true, \ 2541101099Srwatson "Display all VM output on the tty, independently of LogVMOutput") \ 2542101099Srwatson \ 2543101099Srwatson diagnostic(bool, LogVMOutput, false, \ 2544122524Srwatson "Save VM output to LogFile") \ 2545101099Srwatson \ 2546101099Srwatson diagnostic(ccstr, LogFile, NULL, \ 2547132232Srwatson "If LogVMOutput or LogCompilation is on, save VM output to " \ 2548101099Srwatson "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ 2549101099Srwatson \ 2550101099Srwatson product(ccstr, ErrorFile, NULL, \ 2551101099Srwatson "If an error occurs, save the error data to this file " \ 2552101099Srwatson "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 2553101099Srwatson \ 2554101099Srwatson product(bool, DisplayVMOutputToStderr, false, \ 2555101099Srwatson "If DisplayVMOutput is true, display all VM output to stderr") \ 2556101099Srwatson \ 2557101099Srwatson product(bool, DisplayVMOutputToStdout, false, \ 2558101099Srwatson "If DisplayVMOutput is true, display all VM output to stdout") \ 2559101099Srwatson \ 2560101099Srwatson product(bool, UseHeavyMonitors, false, \ 2561101099Srwatson "use heavyweight instead of lightweight Java monitors") \ 2562122524Srwatson \ 2563101099Srwatson product(bool, PrintStringTableStatistics, false, \ 2564101099Srwatson "print statistics about the StringTable and SymbolTable") \ 2565132232Srwatson \ 2566101099Srwatson diagnostic(bool, VerifyStringTableAtExit, false, \ 2567101099Srwatson "verify StringTable contents at exit") \ 2568101099Srwatson \ 2569101099Srwatson notproduct(bool, PrintSymbolTableSizeHistogram, false, \ 2570101099Srwatson "print histogram of the symbol table") \ 2571101099Srwatson \ 2572104530Srwatson notproduct(bool, ExitVMOnVerifyError, false, \ 2573104530Srwatson "standard exit from VM if bytecode verify error " \ 2574104530Srwatson "(only in debug mode)") \ 2575104530Srwatson \ 2576104530Srwatson diagnostic(ccstr, AbortVMOnException, NULL, \ 2577104530Srwatson "Call fatal if this exception is thrown. Example: " \ 2578104530Srwatson "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \ 2579104530Srwatson \ 2580104530Srwatson diagnostic(ccstr, AbortVMOnExceptionMessage, NULL, \ 2581122524Srwatson "Call fatal if the exception pointed by AbortVMOnException " \ 2582104530Srwatson "has this message") \ 2583104530Srwatson \ 2584132232Srwatson develop(bool, DebugVtables, false, \ 2585104530Srwatson "add debugging code to vtable dispatch") \ 2586104530Srwatson \ 2587104530Srwatson notproduct(bool, PrintVtableStats, false, \ 2588104530Srwatson "print vtables stats at end of run") \ 2589132232Srwatson \ 2590104530Srwatson develop(bool, TraceCreateZombies, false, \ 2591104530Srwatson "trace creation of zombie nmethods") \ 2592104530Srwatson \ 2593104530Srwatson notproduct(bool, IgnoreLockingAssertions, false, \ 2594104530Srwatson "disable locking assertions (for speed)") \ 2595104530Srwatson \ 2596119202Srwatson product(bool, RangeCheckElimination, true, \ 2597119202Srwatson "Eliminate range checks") \ 2598119202Srwatson \ 2599119202Srwatson develop_pd(bool, UncommonNullCast, \ 2600119202Srwatson "track occurrences of null in casts; adjust compiler tactics") \ 2601119202Srwatson \ 2602119202Srwatson develop(bool, TypeProfileCasts, true, \ 2603119202Srwatson "treat casts like calls for purposes of type profiling") \ 2604122524Srwatson \ 2605119202Srwatson develop(bool, DelayCompilationDuringStartup, true, \ 2606119202Srwatson "Delay invoking the compiler until main application class is " \ 2607132232Srwatson "loaded") \ 2608119202Srwatson \ 2609119202Srwatson develop(bool, CompileTheWorld, false, \ 2610119202Srwatson "Compile all methods in all classes in bootstrap class path " \ 2611119202Srwatson "(stress test)") \ 2612119202Srwatson \ 2613119202Srwatson develop(bool, CompileTheWorldPreloadClasses, true, \ 2614103759Srwatson "Preload all classes used by a class before start loading") \ 2615101099Srwatson \ 2616101099Srwatson notproduct(intx, CompileTheWorldSafepointInterval, 100, \ 2617101099Srwatson "Force a safepoint every n compiles so sweeper can keep up") \ 2618103759Srwatson \ 2619101099Srwatson develop(bool, FillDelaySlots, true, \ 2620101099Srwatson "Fill delay slots (on SPARC only)") \ 2621103759Srwatson \ 2622122524Srwatson develop(bool, TimeLivenessAnalysis, false, \ 2623101099Srwatson "Time computation of bytecode liveness analysis") \ 2624103759Srwatson \ 2625132232Srwatson develop(bool, TraceLivenessGen, false, \ 2626101099Srwatson "Trace the generation of liveness analysis information") \ 2627101099Srwatson \ 2628103759Srwatson notproduct(bool, TraceLivenessQuery, false, \ 2629101099Srwatson "Trace queries of liveness analysis information") \ 2630101099Srwatson \ 2631101099Srwatson notproduct(bool, CollectIndexSetStatistics, false, \ 2632104546Srwatson "Collect information about IndexSets") \ 2633145076Scsjp \ 2634104546Srwatson develop(bool, UseLoopSafepoints, true, \ 2635104546Srwatson "Generate Safepoint nodes in every loop") \ 2636104546Srwatson \ 2637104546Srwatson develop(intx, FastAllocateSizeLimit, 128*K, \ 2638104546Srwatson /* Note: This value is zero mod 1<<13 for a cheap sparc set. */ \ 2639104546Srwatson "Inline allocations larger than this in doublewords must go slow")\ 2640104546Srwatson \ 2641105637Srwatson product(bool, AggressiveOpts, false, \ 2642104546Srwatson "Enable aggressive optimizations - see arguments.cpp") \ 2643104546Srwatson \ 2644122524Srwatson product_pd(bool, CompactStrings, \ 2645104546Srwatson "Enable Strings to use single byte chars in backing store") \ 2646104546Srwatson \ 2647104546Srwatson product_pd(uintx, TypeProfileLevel, \ 2648132232Srwatson "=XYZ, with Z: Type profiling of arguments at call; " \ 2649104546Srwatson "Y: Type profiling of return value at call; " \ 2650104546Srwatson "X: Type profiling of parameters to methods; " \ 2651145076Scsjp "X, Y and Z in 0=off ; 1=jsr292 only; 2=all methods") \ 2652132232Srwatson constraint(TypeProfileLevelConstraintFunc, AfterErgo) \ 2653104546Srwatson \ 2654104546Srwatson product(intx, TypeProfileArgsLimit, 2, \ 2655104546Srwatson "max number of call arguments to consider for type profiling") \ 2656104569Srwatson range(0, 16) \ 2657104546Srwatson \ 2658104546Srwatson product(intx, TypeProfileParmsLimit, 2, \ 2659104546Srwatson "max number of incoming parameters to consider for type profiling"\ 2660101099Srwatson ", -1 for all") \ 2661106212Srwatson range(-1, 64) \ 2662101099Srwatson \ 2663101099Srwatson /* statistics */ \ 2664101099Srwatson develop(bool, CountCompiledCalls, false, \ 2665101099Srwatson "Count method invocations") \ 2666101099Srwatson \ 2667101099Srwatson notproduct(bool, CountRuntimeCalls, false, \ 2668122524Srwatson "Count VM runtime calls") \ 2669101099Srwatson \ 2670101099Srwatson develop(bool, CountJNICalls, false, \ 2671101099Srwatson "Count jni method invocations") \ 2672101099Srwatson \ 2673132232Srwatson notproduct(bool, CountJVMCalls, false, \ 2674101099Srwatson "Count jvm method invocations") \ 2675101099Srwatson \ 2676101099Srwatson notproduct(bool, CountRemovableExceptions, false, \ 2677132232Srwatson "Count exceptions that could be replaced by branches due to " \ 2678101099Srwatson "inlining") \ 2679101099Srwatson \ 2680101099Srwatson notproduct(bool, ICMissHistogram, false, \ 2681101099Srwatson "Produce histogram of IC misses") \ 2682101099Srwatson \ 2683101099Srwatson /* interpreter */ \ 2684101099Srwatson product_pd(bool, RewriteBytecodes, \ 2685102129Srwatson "Allow rewriting of bytecodes (bytecodes are not immutable)") \ 2686102129Srwatson \ 2687102112Srwatson product_pd(bool, RewriteFrequentPairs, \ 2688102112Srwatson "Rewrite frequently used bytecode pairs into a single bytecode") \ 2689102112Srwatson \ 2690105637Srwatson diagnostic(bool, PrintInterpreter, false, \ 2691102112Srwatson "Print the generated interpreter code") \ 2692102112Srwatson \ 2693122524Srwatson product(bool, UseInterpreter, true, \ 2694102112Srwatson "Use interpreter for non-compiled methods") \ 2695102112Srwatson \ 2696132232Srwatson develop(bool, UseFastSignatureHandlers, true, \ 2697102112Srwatson "Use fast signature handlers for native calls") \ 2698102112Srwatson \ 2699102112Srwatson product(bool, UseLoopCounter, true, \ 2700102112Srwatson "Increment invocation counter on backward branch") \ 2701102112Srwatson \ 2702102112Srwatson product_pd(bool, UseOnStackReplacement, \ 2703102129Srwatson "Use on stack replacement, calls runtime if invoc. counter " \ 2704102129Srwatson "overflows in loop") \ 2705102112Srwatson \ 2706102112Srwatson notproduct(bool, TraceOnStackReplacement, false, \ 2707102112Srwatson "Trace on stack replacement") \ 2708105637Srwatson \ 2709102112Srwatson product_pd(bool, PreferInterpreterNativeStubs, \ 2710102112Srwatson "Use always interpreter stubs for native methods invoked via " \ 2711122524Srwatson "interpreter") \ 2712102112Srwatson \ 2713102112Srwatson develop(bool, CountBytecodes, false, \ 2714132232Srwatson "Count number of bytecodes executed") \ 2715102112Srwatson \ 2716102112Srwatson develop(bool, PrintBytecodeHistogram, false, \ 2717102112Srwatson "Print histogram of the executed bytecodes") \ 2718102112Srwatson \ 2719102112Srwatson develop(bool, PrintBytecodePairHistogram, false, \ 2720102112Srwatson "Print histogram of the executed bytecode pairs") \ 2721101099Srwatson \ 2722101099Srwatson diagnostic(bool, PrintSignatureHandlers, false, \ 2723101099Srwatson "Print code generated for native method signature handlers") \ 2724101099Srwatson \ 2725101099Srwatson develop(bool, VerifyOops, false, \ 2726101099Srwatson "Do plausibility checks for oops") \ 2727101099Srwatson \ 2728101099Srwatson develop(bool, CheckUnhandledOops, false, \ 2729122524Srwatson "Check for unhandled oops in VM code") \ 2730101099Srwatson \ 2731101099Srwatson develop(bool, VerifyJNIFields, trueInDebug, \ 2732132232Srwatson "Verify jfieldIDs for instance fields") \ 2733101099Srwatson \ 2734101099Srwatson notproduct(bool, VerifyJNIEnvThread, false, \ 2735101099Srwatson "Verify JNIEnv.thread == Thread::current() when entering VM " \ 2736101099Srwatson "from JNI") \ 2737101099Srwatson \ 2738101099Srwatson develop(bool, VerifyFPU, false, \ 2739101099Srwatson "Verify FPU state (check for NaN's, etc.)") \ 2740101099Srwatson \ 2741101099Srwatson develop(bool, VerifyThread, false, \ 2742101099Srwatson "Watch the thread register for corruption (SPARC only)") \ 2743101099Srwatson \ 2744101099Srwatson develop(bool, VerifyActivationFrameSize, false, \ 2745101099Srwatson "Verify that activation frame didn't become smaller than its " \ 2746101099Srwatson "minimal size") \ 2747122524Srwatson \ 2748101099Srwatson develop(bool, TraceFrequencyInlining, false, \ 2749101099Srwatson "Trace frequency based inlining") \ 2750132232Srwatson \ 2751101099Srwatson develop_pd(bool, InlineIntrinsics, \ 2752101099Srwatson "Inline intrinsics that can be statically resolved") \ 2753101099Srwatson \ 2754101099Srwatson product_pd(bool, ProfileInterpreter, \ 2755101099Srwatson "Profile at the bytecode level during interpretation") \ 2756101099Srwatson \ 2757101099Srwatson develop(bool, TraceProfileInterpreter, false, \ 2758101099Srwatson "Trace profiling at the bytecode level during interpretation. " \ 2759101099Srwatson "This outputs the profiling information collected to improve " \ 2760101099Srwatson "jit compilation.") \ 2761105634Srwatson \ 2762101099Srwatson develop_pd(bool, ProfileTraps, \ 2763101099Srwatson "Profile deoptimization traps at the bytecode level") \ 2764101099Srwatson \ 2765122524Srwatson product(intx, ProfileMaturityPercentage, 20, \ 2766101099Srwatson "number of method invocations/branches (expressed as % of " \ 2767101099Srwatson "CompileThreshold) before using the method's profile") \ 2768105634Srwatson range(0, 100) \ 2769132232Srwatson \ 2770101099Srwatson diagnostic(bool, PrintMethodData, false, \ 2771132232Srwatson "Print the results of +ProfileInterpreter at end of run") \ 2772105634Srwatson \ 2773105634Srwatson develop(bool, VerifyDataPointer, trueInDebug, \ 2774101099Srwatson "Verify the method data pointer during interpreter profiling") \ 2775101099Srwatson \ 2776105634Srwatson develop(bool, VerifyCompiledCode, false, \ 2777105634Srwatson "Include miscellaneous runtime verifications in nmethod code; " \ 2778101099Srwatson "default off because it disturbs nmethod size heuristics") \ 2779132232Srwatson \ 2780101099Srwatson notproduct(bool, CrashGCForDumpingJavaThread, false, \ 2781101099Srwatson "Manually make GC thread crash then dump java stack trace; " \ 2782101099Srwatson "Test only") \ 2783105634Srwatson \ 2784101099Srwatson /* compilation */ \ 2785132232Srwatson product(bool, UseCompiler, true, \ 2786105634Srwatson "Use Just-In-Time compilation") \ 2787105634Srwatson \ 2788105634Srwatson develop(bool, TraceCompilationPolicy, false, \ 2789105634Srwatson "Trace compilation policy") \ 2790132232Srwatson \ 2791105634Srwatson develop(bool, TimeCompilationPolicy, false, \ 2792101099Srwatson "Time the compilation policy") \ 2793105634Srwatson \ 2794105634Srwatson product(bool, UseCounterDecay, true, \ 2795105634Srwatson "Adjust recompilation counters") \ 2796105634Srwatson \ 2797105634Srwatson develop(intx, CounterHalfLifeTime, 30, \ 2798106090Srwatson "Half-life time of invocation counters (in seconds)") \ 2799105634Srwatson \ 2800105634Srwatson develop(intx, CounterDecayMinIntervalLength, 500, \ 2801105634Srwatson "The minimum interval (in milliseconds) between invocation of " \ 2802105634Srwatson "CounterDecay") \ 2803105634Srwatson \ 2804105634Srwatson product(bool, AlwaysCompileLoopMethods, false, \ 2805101099Srwatson "When using recompilation, never interpret methods " \ 2806101099Srwatson "containing loops") \ 2807101099Srwatson \ 2808101099Srwatson product(bool, DontCompileHugeMethods, true, \ 2809101099Srwatson "Do not compile methods > HugeMethodLimit") \ 2810101099Srwatson \ 2811101099Srwatson /* Bytecode escape analysis estimation. */ \ 2812101099Srwatson product(bool, EstimateArgEscape, true, \ 2813101099Srwatson "Analyze bytecodes to estimate escape state of arguments") \ 2814101099Srwatson \ 2815101099Srwatson product(intx, BCEATraceLevel, 0, \ 2816101099Srwatson "How much tracing to do of bytecode escape analysis estimates " \ 2817122524Srwatson "(0-3)") \ 2818101099Srwatson range(0, 3) \ 2819101099Srwatson \ 2820132232Srwatson product(intx, MaxBCEAEstimateLevel, 5, \ 2821101099Srwatson "Maximum number of nested calls that are analyzed by BC EA") \ 2822101099Srwatson range(0, max_jint) \ 2823101099Srwatson \ 2824101099Srwatson product(intx, MaxBCEAEstimateSize, 150, \ 2825132232Srwatson "Maximum bytecode size of a method to be analyzed by BC EA") \ 2826101099Srwatson range(0, max_jint) \ 2827101099Srwatson \ 2828101099Srwatson product(intx, AllocatePrefetchStyle, 1, \ 2829101099Srwatson "0 = no prefetch, " \ 2830101099Srwatson "1 = generate prefetch instructions for each allocation, " \ 2831101099Srwatson "2 = use TLAB watermark to gate allocation prefetch, " \ 2832101099Srwatson "3 = generate one prefetch instruction per cache line") \ 2833101099Srwatson range(0, 3) \ 2834101099Srwatson \ 2835101099Srwatson product(intx, AllocatePrefetchDistance, -1, \ 2836101099Srwatson "Distance to prefetch ahead of allocation pointer. " \ 2837101099Srwatson "-1: use system-specific value (automatically determined") \ 2838101099Srwatson constraint(AllocatePrefetchDistanceConstraintFunc, AfterMemoryInit)\ 2839101099Srwatson \ 2840101099Srwatson product(intx, AllocatePrefetchLines, 3, \ 2841122524Srwatson "Number of lines to prefetch ahead of array allocation pointer") \ 2842101099Srwatson range(1, 64) \ 2843101099Srwatson \ 2844132232Srwatson product(intx, AllocateInstancePrefetchLines, 1, \ 2845101099Srwatson "Number of lines to prefetch ahead of instance allocation " \ 2846101099Srwatson "pointer") \ 2847101099Srwatson range(1, 64) \ 2848101099Srwatson \ 2849101099Srwatson product(intx, AllocatePrefetchStepSize, 16, \ 2850132232Srwatson "Step size in bytes of sequential prefetch instructions") \ 2851101099Srwatson range(1, 512) \ 2852101099Srwatson constraint(AllocatePrefetchStepSizeConstraintFunc,AfterMemoryInit)\ 2853101099Srwatson \ 2854101099Srwatson product(intx, AllocatePrefetchInstr, 0, \ 2855101099Srwatson "Select instruction to prefetch ahead of allocation pointer") \ 2856101099Srwatson constraint(AllocatePrefetchInstrConstraintFunc, AfterMemoryInit) \ 2857101099Srwatson \ 2858101099Srwatson /* deoptimization */ \ 2859101099Srwatson develop(bool, TraceDeoptimization, false, \ 2860101099Srwatson "Trace deoptimization") \ 2861101099Srwatson \ 2862101099Srwatson develop(bool, PrintDeoptimizationDetails, false, \ 2863101099Srwatson "Print more information about deoptimization") \ 2864101099Srwatson \ 2865101099Srwatson develop(bool, DebugDeoptimization, false, \ 2866122524Srwatson "Tracing various information while debugging deoptimization") \ 2867101099Srwatson \ 2868101099Srwatson product(intx, SelfDestructTimer, 0, \ 2869132232Srwatson "Will cause VM to terminate after a given time (in minutes) " \ 2870101099Srwatson "(0 means off)") \ 2871101099Srwatson range(0, max_intx) \ 2872101099Srwatson \ 2873101099Srwatson product(intx, MaxJavaStackTraceDepth, 1024, \ 2874101099Srwatson "The maximum number of lines in the stack trace for Java " \ 2875101099Srwatson "exceptions (0 means all)") \ 2876101099Srwatson range(0, max_jint/2) \ 2877101099Srwatson \ 2878101099Srwatson /* notice: the max range value here is max_jint, not max_intx */ \ 2879101099Srwatson /* because of overflow issue */ \ 2880101099Srwatson diagnostic(intx, GuaranteedSafepointInterval, 1000, \ 2881101099Srwatson "Guarantee a safepoint (at least) every so many milliseconds " \ 2882101099Srwatson "(0 means none)") \ 2883101099Srwatson range(0, max_jint) \ 2884122524Srwatson \ 2885101099Srwatson product(intx, SafepointTimeoutDelay, 10000, \ 2886101099Srwatson "Delay in milliseconds for option SafepointTimeout") \ 2887132232Srwatson LP64_ONLY(range(0, max_intx/MICROUNITS)) \ 2888101099Srwatson NOT_LP64(range(0, max_intx)) \ 2889101099Srwatson \ 2890101099Srwatson product(intx, NmethodSweepActivity, 10, \ 2891101099Srwatson "Removes cold nmethods from code cache if > 0. Higher values " \ 2892101099Srwatson "result in more aggressive sweeping") \ 2893101099Srwatson range(0, 2000) \ 2894101099Srwatson \ 2895101099Srwatson notproduct(bool, LogSweeper, false, \ 2896101099Srwatson "Keep a ring buffer of sweeper activity") \ 2897101099Srwatson \ 2898101099Srwatson notproduct(intx, SweeperLogEntries, 1024, \ 2899101099Srwatson "Number of records in the ring buffer of sweeper activity") \ 2900101099Srwatson \ 2901101099Srwatson notproduct(intx, MemProfilingInterval, 500, \ 2902101099Srwatson "Time between each invocation of the MemProfiler") \ 2903122524Srwatson \ 2904101099Srwatson develop(intx, MallocCatchPtr, -1, \ 2905101099Srwatson "Hit breakpoint when mallocing/freeing this pointer") \ 2906132232Srwatson \ 2907101099Srwatson notproduct(ccstrlist, SuppressErrorAt, "", \ 2908101099Srwatson "List of assertions (file:line) to muzzle") \ 2909101099Srwatson \ 2910101099Srwatson develop(intx, StackPrintLimit, 100, \ 2911101099Srwatson "number of stack frames to print in VM-level stack dump") \ 2912101099Srwatson \ 2913101099Srwatson notproduct(intx, MaxElementPrintSize, 256, \ 2914101099Srwatson "maximum number of elements to print") \ 2915101099Srwatson \ 2916101099Srwatson notproduct(intx, MaxSubklassPrintSize, 4, \ 2917101099Srwatson "maximum number of subklasses to print when printing klass") \ 2918101099Srwatson \ 2919101099Srwatson product(intx, MaxInlineLevel, 9, \ 2920101099Srwatson "maximum number of nested calls that are inlined") \ 2921101099Srwatson range(0, max_jint) \ 2922101099Srwatson \ 2923122524Srwatson product(intx, MaxRecursiveInlineLevel, 1, \ 2924101099Srwatson "maximum number of nested recursive calls that are inlined") \ 2925101099Srwatson range(0, max_jint) \ 2926132232Srwatson \ 2927101099Srwatson develop(intx, MaxForceInlineLevel, 100, \ 2928101099Srwatson "maximum number of nested calls that are forced for inlining " \ 2929101099Srwatson "(using CompileCommand or marked w/ @ForceInline)") \ 2930101099Srwatson range(0, max_jint) \ 2931101099Srwatson \ 2932101099Srwatson product_pd(intx, InlineSmallCode, \ 2933101099Srwatson "Only inline already compiled methods if their code size is " \ 2934101099Srwatson "less than this") \ 2935101099Srwatson range(0, max_jint) \ 2936101099Srwatson \ 2937101099Srwatson product(intx, MaxInlineSize, 35, \ 2938101099Srwatson "The maximum bytecode size of a method to be inlined") \ 2939101099Srwatson range(0, max_jint) \ 2940101099Srwatson \ 2941122524Srwatson product_pd(intx, FreqInlineSize, \ 2942101099Srwatson "The maximum bytecode size of a frequent method to be inlined") \ 2943101099Srwatson range(0, max_jint) \ 2944132232Srwatson \ 2945101099Srwatson product(intx, MaxTrivialSize, 6, \ 2946101099Srwatson "The maximum bytecode size of a trivial method to be inlined") \ 2947101099Srwatson range(0, max_jint) \ 2948101099Srwatson \ 2949101099Srwatson product(intx, MinInliningThreshold, 250, \ 2950101099Srwatson "The minimum invocation count a method needs to have to be " \ 2951101099Srwatson "inlined") \ 2952101099Srwatson range(0, max_jint) \ 2953101099Srwatson \ 2954101099Srwatson develop(intx, MethodHistogramCutoff, 100, \ 2955101099Srwatson "The cutoff value for method invocation histogram (+CountCalls)") \ 2956101099Srwatson \ 2957101099Srwatson diagnostic(intx, ProfilerNumberOfInterpretedMethods, 25, \ 2958101099Srwatson "Number of interpreted methods to show in profile") \ 2959122524Srwatson \ 2960101099Srwatson diagnostic(intx, ProfilerNumberOfCompiledMethods, 25, \ 2961101099Srwatson "Number of compiled methods to show in profile") \ 2962132232Srwatson \ 2963101099Srwatson diagnostic(intx, ProfilerNumberOfStubMethods, 25, \ 2964101099Srwatson "Number of stub methods to show in profile") \ 2965101099Srwatson \ 2966101099Srwatson diagnostic(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ 2967101099Srwatson "Number of runtime stub nodes to show in profile") \ 2968101099Srwatson \ 2969101099Srwatson product(intx, ProfileIntervalsTicks, 100, \ 2970101099Srwatson "Number of ticks between printing of interval profile " \ 2971101099Srwatson "(+ProfileIntervals)") \ 2972101099Srwatson range(0, max_intx) \ 2973101099Srwatson \ 2974101099Srwatson notproduct(intx, ScavengeALotInterval, 1, \ 2975101099Srwatson "Interval between which scavenge will occur with +ScavengeALot") \ 2976101099Srwatson \ 2977122524Srwatson notproduct(intx, FullGCALotInterval, 1, \ 2978101099Srwatson "Interval between which full gc will occur with +FullGCALot") \ 2979101099Srwatson \ 2980132232Srwatson notproduct(intx, FullGCALotStart, 0, \ 2981101099Srwatson "For which invocation to start FullGCAlot") \ 2982101099Srwatson \ 2983101099Srwatson notproduct(intx, FullGCALotDummies, 32*K, \ 2984101099Srwatson "Dummy object allocated with +FullGCALot, forcing all objects " \ 2985101099Srwatson "to move") \ 2986101099Srwatson \ 2987102129Srwatson develop(intx, DontYieldALotInterval, 10, \ 2988102129Srwatson "Interval between which yields will be dropped (milliseconds)") \ 2989101099Srwatson \ 2990101099Srwatson develop(intx, ProfilerPCTickThreshold, 15, \ 2991101099Srwatson "Number of ticks in a PC buckets to be a hotspot") \ 2992101099Srwatson \ 2993101099Srwatson notproduct(intx, DeoptimizeALotInterval, 5, \ 2994101099Srwatson "Number of exits until DeoptimizeALot kicks in") \ 2995122524Srwatson \ 2996101099Srwatson notproduct(intx, ZombieALotInterval, 5, \ 2997101099Srwatson "Number of exits until ZombieALot kicks in") \ 2998132232Srwatson \ 2999101099Srwatson diagnostic(uintx, MallocMaxTestWords, 0, \ 3000101099Srwatson "If non-zero, maximum number of words that malloc/realloc can " \ 3001101099Srwatson "allocate (for testing only)") \ 3002101099Srwatson range(0, max_uintx) \ 3003101099Srwatson \ 3004102112Srwatson product(intx, TypeProfileWidth, 2, \ 3005102129Srwatson "Number of receiver types to record in call/cast profile") \ 3006102129Srwatson range(0, 8) \ 3007102112Srwatson \ 3008102112Srwatson develop(intx, BciProfileWidth, 2, \ 3009102112Srwatson "Number of return bci's to record in ret profile") \ 3010105637Srwatson \ 3011102112Srwatson product(intx, PerMethodRecompilationCutoff, 400, \ 3012102112Srwatson "After recompiling N times, stay in the interpreter (-1=>'Inf')") \ 3013122524Srwatson range(-1, max_intx) \ 3014102112Srwatson \ 3015102112Srwatson product(intx, PerBytecodeRecompilationCutoff, 200, \ 3016132232Srwatson "Per-BCI limit on repeated recompilation (-1=>'Inf')") \ 3017102112Srwatson range(-1, max_intx) \ 3018102112Srwatson \ 3019102112Srwatson product(intx, PerMethodTrapLimit, 100, \ 3020102112Srwatson "Limit on traps (of one kind) in a method (includes inlines)") \ 3021102112Srwatson range(0, max_jint) \ 3022161026Srwatson \ 3023160243Scsjp experimental(intx, PerMethodSpecTrapLimit, 5000, \ 3024160243Scsjp "Limit on speculative traps (of one kind) in a method " \ 3025160243Scsjp "(includes inlines)") \ 3026160243Scsjp range(0, max_jint) \ 3027160243Scsjp \ 3028160243Scsjp product(intx, PerBytecodeTrapLimit, 4, \ 3029160243Scsjp "Limit on traps (of one kind) at a particular BCI") \ 3030160243Scsjp range(0, max_jint) \ 3031160243Scsjp \ 3032160243Scsjp experimental(intx, SpecTrapLimitExtraEntries, 3, \ 3033106217Srwatson "Extra method data trap entries for speculation") \ 3034101099Srwatson \ 3035106217Srwatson develop(intx, InlineFrequencyRatio, 20, \ 3036106217Srwatson "Ratio of call site execution to caller method invocation") \ 3037106217Srwatson range(0, max_jint) \ 3038106217Srwatson \ 3039106217Srwatson diagnostic_pd(intx, InlineFrequencyCount, \ 3040122875Srwatson "Count of call site execution necessary to trigger frequent " \ 3041140628Srwatson "inlining") \ 3042140628Srwatson range(0, max_jint) \ 3043147091Srwatson \ 3044140628Srwatson develop(intx, InlineThrowCount, 50, \ 3045112675Srwatson "Force inlining of interpreted methods that throw this often") \ 3046106217Srwatson range(0, max_jint) \ 3047106217Srwatson \ 3048106217Srwatson develop(intx, InlineThrowMaxSize, 200, \ 3049106217Srwatson "Force inlining of throwing methods smaller than this") \ 3050145855Srwatson range(0, max_jint) \ 3051106217Srwatson \ 3052106217Srwatson develop(intx, ProfilerNodeSize, 1024, \ 3053106217Srwatson "Size in K to allocate for the Profile Nodes of each thread") \ 3054106217Srwatson range(0, 1024) \ 3055106217Srwatson \ 3056106217Srwatson /* gc parameters */ \ 3057106217Srwatson product(size_t, InitialHeapSize, 0, \ 3058122875Srwatson "Initial heap size (in bytes); zero means use ergonomics") \ 3059140628Srwatson constraint(InitialHeapSizeConstraintFunc,AfterErgo) \ 3060140628Srwatson \ 3061147091Srwatson product(size_t, MaxHeapSize, ScaleForWordSize(96*M), \ 3062140628Srwatson "Maximum heap size (in bytes)") \ 3063106217Srwatson constraint(MaxHeapSizeConstraintFunc,AfterErgo) \ 3064106217Srwatson \ 3065106217Srwatson product(size_t, OldSize, ScaleForWordSize(4*M), \ 3066106217Srwatson "Initial tenured generation size (in bytes)") \ 3067106217Srwatson range(0, max_uintx) \ 3068145855Srwatson \ 3069106217Srwatson product(size_t, NewSize, ScaleForWordSize(1*M), \ 3070106217Srwatson "Initial new generation size (in bytes)") \ 3071106217Srwatson constraint(NewSizeConstraintFunc,AfterErgo) \ 3072123173Srwatson \ 3073131025Srwatson product(size_t, MaxNewSize, max_uintx, \ 3074115707Srwatson "Maximum new generation size (in bytes), max_uintx means set " \ 3075106217Srwatson "ergonomically") \ 3076122820Srwatson range(0, max_uintx) \ 3077106217Srwatson \ 3078106217Srwatson product_pd(size_t, HeapBaseMinAddress, \ 3079106217Srwatson "OS specific low limit for heap base address") \ 3080106217Srwatson constraint(HeapBaseMinAddressConstraintFunc,AfterErgo) \ 3081106217Srwatson \ 3082106217Srwatson product(size_t, PretenureSizeThreshold, 0, \ 3083106217Srwatson "Maximum size in bytes of objects allocated in DefNew " \ 3084106217Srwatson "generation; zero means no maximum") \ 3085106217Srwatson range(0, max_uintx) \ 3086106217Srwatson \ 3087106217Srwatson product(size_t, MinTLABSize, 2*K, \ 3088106217Srwatson "Minimum allowed TLAB size (in bytes)") \ 3089106217Srwatson range(1, max_uintx/2) \ 3090106217Srwatson constraint(MinTLABSizeConstraintFunc,AfterMemoryInit) \ 3091106217Srwatson \ 3092106217Srwatson product(size_t, TLABSize, 0, \ 3093106217Srwatson "Starting TLAB size (in bytes); zero means set ergonomically") \ 3094106217Srwatson constraint(TLABSizeConstraintFunc,AfterMemoryInit) \ 3095106217Srwatson \ 3096106217Srwatson product(size_t, YoungPLABSize, 4096, \ 3097106217Srwatson "Size of young gen promotion LAB's (in HeapWords)") \ 3098106217Srwatson constraint(YoungPLABSizeConstraintFunc,AfterMemoryInit) \ 3099106217Srwatson \ 3100106217Srwatson product(size_t, OldPLABSize, 1024, \ 3101106217Srwatson "Size of old gen promotion LAB's (in HeapWords), or Number " \ 3102145855Srwatson "of blocks to attempt to claim when refilling CMS LAB's") \ 3103106217Srwatson constraint(OldPLABSizeConstraintFunc,AfterMemoryInit) \ 3104106217Srwatson \ 3105106217Srwatson product(uintx, TLABAllocationWeight, 35, \ 3106106217Srwatson "Allocation averaging weight") \ 3107106217Srwatson range(0, 100) \ 3108106217Srwatson \ 3109106217Srwatson /* Limit the lower bound of this flag to 1 as it is used */ \ 3110106217Srwatson /* in a division expression. */ \ 3111106217Srwatson product(uintx, TLABWasteTargetPercent, 1, \ 3112106217Srwatson "Percentage of Eden that can be wasted") \ 3113122875Srwatson range(1, 100) \ 3114140628Srwatson \ 3115140628Srwatson product(uintx, TLABRefillWasteFraction, 64, \ 3116147091Srwatson "Maximum TLAB waste at a refill (internal fragmentation)") \ 3117140628Srwatson range(1, max_juint) \ 3118106217Srwatson \ 3119123607Srwatson product(uintx, TLABWasteIncrement, 4, \ 3120106217Srwatson "Increment allowed waste at slow allocation") \ 3121106217Srwatson range(0, max_jint) \ 3122106217Srwatson constraint(TLABWasteIncrementConstraintFunc,AfterMemoryInit) \ 3123106217Srwatson \ 3124106217Srwatson product(uintx, SurvivorRatio, 8, \ 3125106217Srwatson "Ratio of eden/survivor space size") \ 3126106217Srwatson range(1, max_uintx-2) \ 3127106217Srwatson constraint(SurvivorRatioConstraintFunc,AfterMemoryInit) \ 3128122875Srwatson \ 3129106217Srwatson product(uintx, NewRatio, 2, \ 3130106217Srwatson "Ratio of old/new generation sizes") \ 3131106217Srwatson range(0, max_uintx-1) \ 3132140628Srwatson \ 3133140628Srwatson product_pd(size_t, NewSizeThreadIncrease, \ 3134147091Srwatson "Additional size added to desired new generation size per " \ 3135140628Srwatson "non-daemon thread (in bytes)") \ 3136106217Srwatson range(0, max_uintx) \ 3137106217Srwatson \ 3138106217Srwatson product_pd(size_t, MetaspaceSize, \ 3139106217Srwatson "Initial threshold (in bytes) at which a garbage collection " \ 3140106217Srwatson "is done to reduce Metaspace usage") \ 3141122875Srwatson constraint(MetaspaceSizeConstraintFunc,AfterErgo) \ 3142140628Srwatson \ 3143140628Srwatson product(size_t, MaxMetaspaceSize, max_uintx, \ 3144140628Srwatson "Maximum size of Metaspaces (in bytes)") \ 3145140628Srwatson constraint(MaxMetaspaceSizeConstraintFunc,AfterErgo) \ 3146140628Srwatson \ 3147140628Srwatson product(size_t, CompressedClassSpaceSize, 1*G, \ 3148140628Srwatson "Maximum size of class area in Metaspace when compressed " \ 3149140628Srwatson "class pointers are used") \ 3150140628Srwatson range(1*M, 3*G) \ 3151140628Srwatson \ 3152140628Srwatson manageable(uintx, MinHeapFreeRatio, 40, \ 3153140628Srwatson "The minimum percentage of heap free after GC to avoid expansion."\ 3154110354Srwatson " For most GCs this applies to the old generation. In G1 and" \ 3155110354Srwatson " ParallelGC it applies to the whole heap.") \ 3156106217Srwatson range(0, 100) \ 3157106217Srwatson constraint(MinHeapFreeRatioConstraintFunc,AfterErgo) \ 3158106217Srwatson \ 3159106217Srwatson manageable(uintx, MaxHeapFreeRatio, 70, \ 3160106217Srwatson "The maximum percentage of heap free after GC to avoid shrinking."\ 3161106217Srwatson " For most GCs this applies to the old generation. In G1 and" \ 3162106217Srwatson " ParallelGC it applies to the whole heap.") \ 3163145855Srwatson range(0, 100) \ 3164145855Srwatson constraint(MaxHeapFreeRatioConstraintFunc,AfterErgo) \ 3165145855Srwatson \ 3166145855Srwatson product(bool, ShrinkHeapInSteps, true, \ 3167145855Srwatson "When disabled, informs the GC to shrink the java heap directly" \ 3168145855Srwatson " to the target size at the next full GC rather than requiring" \ 3169106217Srwatson " smaller steps during multiple full GCs.") \ 3170106217Srwatson \ 3171106217Srwatson product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ 3172106217Srwatson "Number of milliseconds per MB of free space in the heap") \ 3173106217Srwatson range(0, max_intx) \ 3174106217Srwatson constraint(SoftRefLRUPolicyMSPerMBConstraintFunc,AfterMemoryInit) \ 3175112574Srwatson \ 3176106418Srwatson product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ 3177106418Srwatson "The minimum change in heap space due to GC (in bytes)") \ 3178106217Srwatson range(0, max_uintx) \ 3179112574Srwatson \ 3180106217Srwatson product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ 3181106217Srwatson "The minimum expansion of Metaspace (in bytes)") \ 3182106217Srwatson range(0, max_uintx) \ 3183106217Srwatson \ 3184106217Srwatson product(uintx, MaxMetaspaceFreeRatio, 70, \ 3185106217Srwatson "The maximum percentage of Metaspace free after GC to avoid " \ 3186106217Srwatson "shrinking") \ 3187119202Srwatson range(0, 100) \ 3188106217Srwatson constraint(MaxMetaspaceFreeRatioConstraintFunc,AfterErgo) \ 3189106217Srwatson \ 3190106217Srwatson product(uintx, MinMetaspaceFreeRatio, 40, \ 3191106217Srwatson "The minimum percentage of Metaspace free after GC to avoid " \ 3192119202Srwatson "expansion") \ 3193106217Srwatson range(0, 99) \ 3194106217Srwatson constraint(MinMetaspaceFreeRatioConstraintFunc,AfterErgo) \ 3195106217Srwatson \ 3196106217Srwatson product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ 3197106217Srwatson "The maximum expansion of Metaspace without full GC (in bytes)") \ 3198106217Srwatson range(0, max_uintx) \ 3199106217Srwatson \ 3200106217Srwatson product(uintx, QueuedAllocationWarningCount, 0, \ 3201106217Srwatson "Number of times an allocation that queues behind a GC " \ 3202106217Srwatson "will retry before printing a warning") \ 3203106217Srwatson range(0, max_uintx) \ 3204106217Srwatson \ 3205106217Srwatson diagnostic(uintx, VerifyGCStartAt, 0, \ 3206106217Srwatson "GC invoke count where +VerifyBefore/AfterGC kicks in") \ 3207106217Srwatson range(0, max_uintx) \ 3208106217Srwatson \ 3209106217Srwatson diagnostic(intx, VerifyGCLevel, 0, \ 3210106217Srwatson "Generation level at which to start +VerifyBefore/AfterGC") \ 3211106217Srwatson range(0, 1) \ 3212160243Scsjp \ 3213101099Srwatson product(uintx, MaxTenuringThreshold, 15, \ 3214101099Srwatson "Maximum value for tenuring threshold") \ 3215112717Srwatson range(0, markOopDesc::max_age + 1) \ 3216113531Srwatson constraint(MaxTenuringThresholdConstraintFunc,AfterErgo) \ 3217 \ 3218 product(uintx, InitialTenuringThreshold, 7, \ 3219 "Initial value for tenuring threshold") \ 3220 range(0, markOopDesc::max_age + 1) \ 3221 constraint(InitialTenuringThresholdConstraintFunc,AfterErgo) \ 3222 \ 3223 product(uintx, TargetSurvivorRatio, 50, \ 3224 "Desired percentage of survivor space used after scavenge") \ 3225 range(0, 100) \ 3226 \ 3227 product(uintx, MarkSweepDeadRatio, 5, \ 3228 "Percentage (0-100) of the old gen allowed as dead wood. " \ 3229 "Serial mark sweep treats this as both the minimum and maximum " \ 3230 "value. " \ 3231 "CMS uses this value only if it falls back to mark sweep. " \ 3232 "Par compact uses a variable scale based on the density of the " \ 3233 "generation and treats this as the maximum value when the heap " \ 3234 "is either completely full or completely empty. Par compact " \ 3235 "also has a smaller default value; see arguments.cpp.") \ 3236 range(0, 100) \ 3237 \ 3238 product(uint, MarkSweepAlwaysCompactCount, 4, \ 3239 "How often should we fully compact the heap (ignoring the dead " \ 3240 "space parameters)") \ 3241 range(1, max_juint) \ 3242 \ 3243 develop(uintx, GCExpandToAllocateDelayMillis, 0, \ 3244 "Delay between expansion and allocation (in milliseconds)") \ 3245 \ 3246 develop(uintx, GCWorkerDelayMillis, 0, \ 3247 "Delay in scheduling GC workers (in milliseconds)") \ 3248 \ 3249 product(intx, DeferThrSuspendLoopCount, 4000, \ 3250 "(Unstable) Number of times to iterate in safepoint loop " \ 3251 "before blocking VM threads ") \ 3252 range(-1, max_jint-1) \ 3253 \ 3254 product(intx, DeferPollingPageLoopCount, -1, \ 3255 "(Unsafe,Unstable) Number of iterations in safepoint loop " \ 3256 "before changing safepoint polling page to RO ") \ 3257 range(-1, max_jint-1) \ 3258 \ 3259 product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 3260 range(0, max_intx) \ 3261 \ 3262 product(bool, PSChunkLargeArrays, true, \ 3263 "Process large arrays in chunks") \ 3264 \ 3265 product(uintx, GCDrainStackTargetSize, 64, \ 3266 "Number of entries we will try to leave on the stack " \ 3267 "during parallel gc") \ 3268 range(0, max_juint) \ 3269 \ 3270 /* stack parameters */ \ 3271 product_pd(intx, StackYellowPages, \ 3272 "Number of yellow zone (recoverable overflows) pages of size " \ 3273 "4KB. If pages are bigger yellow zone is aligned up.") \ 3274 range(MIN_STACK_YELLOW_PAGES, (DEFAULT_STACK_YELLOW_PAGES+5)) \ 3275 \ 3276 product_pd(intx, StackRedPages, \ 3277 "Number of red zone (unrecoverable overflows) pages of size " \ 3278 "4KB. If pages are bigger red zone is aligned up.") \ 3279 range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2)) \ 3280 \ 3281 product_pd(intx, StackReservedPages, \ 3282 "Number of reserved zone (reserved to annotated methods) pages" \ 3283 " of size 4KB. If pages are bigger reserved zone is aligned up.") \ 3284 range(MIN_STACK_RESERVED_PAGES, (DEFAULT_STACK_RESERVED_PAGES+10))\ 3285 \ 3286 product(bool, RestrictReservedStack, true, \ 3287 "Restrict @ReservedStackAccess to trusted classes") \ 3288 \ 3289 /* greater stack shadow pages can't generate instruction to bang stack */ \ 3290 product_pd(intx, StackShadowPages, \ 3291 "Number of shadow zone (for overflow checking) pages of size " \ 3292 "4KB. If pages are bigger shadow zone is aligned up. " \ 3293 "This should exceed the depth of the VM and native call stack.") \ 3294 range(MIN_STACK_SHADOW_PAGES, (DEFAULT_STACK_SHADOW_PAGES+30)) \ 3295 \ 3296 product_pd(intx, ThreadStackSize, \ 3297 "Thread Stack Size (in Kbytes)") \ 3298 range(0, (max_intx-os::vm_page_size())/(1 * K)) \ 3299 \ 3300 product_pd(intx, VMThreadStackSize, \ 3301 "Non-Java Thread Stack Size (in Kbytes)") \ 3302 range(0, max_intx/(1 * K)) \ 3303 \ 3304 product_pd(intx, CompilerThreadStackSize, \ 3305 "Compiler Thread Stack Size (in Kbytes)") \ 3306 range(0, max_intx/(1 * K)) \ 3307 \ 3308 develop_pd(size_t, JVMInvokeMethodSlack, \ 3309 "Stack space (bytes) required for JVM_InvokeMethod to complete") \ 3310 \ 3311 /* code cache parameters */ \ 3312 develop_pd(uintx, CodeCacheSegmentSize, \ 3313 "Code cache segment size (in bytes) - smallest unit of " \ 3314 "allocation") \ 3315 range(1, 1024) \ 3316 constraint(CodeCacheSegmentSizeConstraintFunc, AfterErgo) \ 3317 \ 3318 develop_pd(intx, CodeEntryAlignment, \ 3319 "Code entry alignment for generated code (in bytes)") \ 3320 constraint(CodeEntryAlignmentConstraintFunc, AfterErgo) \ 3321 \ 3322 product_pd(intx, OptoLoopAlignment, \ 3323 "Align inner loops to zero relative to this modulus") \ 3324 range(1, 16) \ 3325 constraint(OptoLoopAlignmentConstraintFunc, AfterErgo) \ 3326 \ 3327 product_pd(uintx, InitialCodeCacheSize, \ 3328 "Initial code cache size (in bytes)") \ 3329 range(0, max_uintx) \ 3330 \ 3331 develop_pd(uintx, CodeCacheMinimumUseSpace, \ 3332 "Minimum code cache size (in bytes) required to start VM.") \ 3333 range(0, max_uintx) \ 3334 \ 3335 product(bool, SegmentedCodeCache, false, \ 3336 "Use a segmented code cache") \ 3337 \ 3338 product_pd(uintx, ReservedCodeCacheSize, \ 3339 "Reserved code cache size (in bytes) - maximum code cache size") \ 3340 range(0, max_uintx) \ 3341 \ 3342 product_pd(uintx, NonProfiledCodeHeapSize, \ 3343 "Size of code heap with non-profiled methods (in bytes)") \ 3344 range(0, max_uintx) \ 3345 \ 3346 product_pd(uintx, ProfiledCodeHeapSize, \ 3347 "Size of code heap with profiled methods (in bytes)") \ 3348 range(0, max_uintx) \ 3349 \ 3350 product_pd(uintx, NonNMethodCodeHeapSize, \ 3351 "Size of code heap with non-nmethods (in bytes)") \ 3352 range(0, max_uintx) \ 3353 \ 3354 product_pd(uintx, CodeCacheExpansionSize, \ 3355 "Code cache expansion size (in bytes)") \ 3356 range(0, max_uintx) \ 3357 \ 3358 diagnostic_pd(uintx, CodeCacheMinBlockLength, \ 3359 "Minimum number of segments in a code cache block") \ 3360 range(1, 100) \ 3361 \ 3362 notproduct(bool, ExitOnFullCodeCache, false, \ 3363 "Exit the VM if we fill the code cache") \ 3364 \ 3365 product(bool, UseCodeCacheFlushing, true, \ 3366 "Remove cold/old nmethods from the code cache") \ 3367 \ 3368 product(uintx, StartAggressiveSweepingAt, 10, \ 3369 "Start aggressive sweeping if X[%] of the code cache is free." \ 3370 "Segmented code cache: X[%] of the non-profiled heap." \ 3371 "Non-segmented code cache: X[%] of the total code cache") \ 3372 range(0, 100) \ 3373 \ 3374 /* AOT parameters */ \ 3375 product(bool, UseAOT, AOT_ONLY(true) NOT_AOT(false), \ 3376 "Use AOT compiled files") \ 3377 \ 3378 product(ccstrlist, AOTLibrary, NULL, \ 3379 "AOT library") \ 3380 \ 3381 product(bool, PrintAOT, false, \ 3382 "Print used AOT klasses and methods") \ 3383 \ 3384 notproduct(bool, PrintAOTStatistics, false, \ 3385 "Print AOT statistics") \ 3386 \ 3387 diagnostic(bool, UseAOTStrictLoading, false, \ 3388 "Exit the VM if any of the AOT libraries has invalid config") \ 3389 \ 3390 product(bool, CalculateClassFingerprint, false, \ 3391 "Calculate class fingerprint") \ 3392 \ 3393 /* interpreter debugging */ \ 3394 develop(intx, BinarySwitchThreshold, 5, \ 3395 "Minimal number of lookupswitch entries for rewriting to binary " \ 3396 "switch") \ 3397 \ 3398 develop(intx, StopInterpreterAt, 0, \ 3399 "Stop interpreter execution at specified bytecode number") \ 3400 \ 3401 develop(intx, TraceBytecodesAt, 0, \ 3402 "Trace bytecodes starting with specified bytecode number") \ 3403 \ 3404 /* compiler interface */ \ 3405 develop(intx, CIStart, 0, \ 3406 "The id of the first compilation to permit") \ 3407 \ 3408 develop(intx, CIStop, max_jint, \ 3409 "The id of the last compilation to permit") \ 3410 \ 3411 develop(intx, CIStartOSR, 0, \ 3412 "The id of the first osr compilation to permit " \ 3413 "(CICountOSR must be on)") \ 3414 \ 3415 develop(intx, CIStopOSR, max_jint, \ 3416 "The id of the last osr compilation to permit " \ 3417 "(CICountOSR must be on)") \ 3418 \ 3419 develop(intx, CIBreakAtOSR, -1, \ 3420 "The id of osr compilation to break at") \ 3421 \ 3422 develop(intx, CIBreakAt, -1, \ 3423 "The id of compilation to break at") \ 3424 \ 3425 product(ccstrlist, CompileOnly, "", \ 3426 "List of methods (pkg/class.name) to restrict compilation to") \ 3427 \ 3428 product(ccstr, CompileCommandFile, NULL, \ 3429 "Read compiler commands from this file [.hotspot_compiler]") \ 3430 \ 3431 diagnostic(ccstr, CompilerDirectivesFile, NULL, \ 3432 "Read compiler directives from this file") \ 3433 \ 3434 product(ccstrlist, CompileCommand, "", \ 3435 "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \ 3436 \ 3437 develop(bool, ReplayCompiles, false, \ 3438 "Enable replay of compilations from ReplayDataFile") \ 3439 \ 3440 product(ccstr, ReplayDataFile, NULL, \ 3441 "File containing compilation replay information" \ 3442 "[default: ./replay_pid%p.log] (%p replaced with pid)") \ 3443 \ 3444 product(ccstr, InlineDataFile, NULL, \ 3445 "File containing inlining replay information" \ 3446 "[default: ./inline_pid%p.log] (%p replaced with pid)") \ 3447 \ 3448 develop(intx, ReplaySuppressInitializers, 2, \ 3449 "Control handling of class initialization during replay: " \ 3450 "0 - don't do anything special; " \ 3451 "1 - treat all class initializers as empty; " \ 3452 "2 - treat class initializers for application classes as empty; " \ 3453 "3 - allow all class initializers to run during bootstrap but " \ 3454 " pretend they are empty after starting replay") \ 3455 range(0, 3) \ 3456 \ 3457 develop(bool, ReplayIgnoreInitErrors, false, \ 3458 "Ignore exceptions thrown during initialization for replay") \ 3459 \ 3460 product(bool, DumpReplayDataOnError, true, \ 3461 "Record replay data for crashing compiler threads") \ 3462 \ 3463 product(bool, CICompilerCountPerCPU, false, \ 3464 "1 compiler thread for log(N CPUs)") \ 3465 \ 3466 develop(intx, CIFireOOMAt, -1, \ 3467 "Fire OutOfMemoryErrors throughout CI for testing the compiler " \ 3468 "(non-negative value throws OOM after this many CI accesses " \ 3469 "in each compile)") \ 3470 notproduct(intx, CICrashAt, -1, \ 3471 "id of compilation to trigger assert in compiler thread for " \ 3472 "the purpose of testing, e.g. generation of replay data") \ 3473 notproduct(bool, CIObjectFactoryVerify, false, \ 3474 "enable potentially expensive verification in ciObjectFactory") \ 3475 \ 3476 /* Priorities */ \ 3477 product_pd(bool, UseThreadPriorities, "Use native thread priorities") \ 3478 \ 3479 product(intx, ThreadPriorityPolicy, 0, \ 3480 "0 : Normal. "\ 3481 " VM chooses priorities that are appropriate for normal "\ 3482 " applications. On Solaris NORM_PRIORITY and above are mapped "\ 3483 " to normal native priority. Java priorities below " \ 3484 " NORM_PRIORITY map to lower native priority values. On "\ 3485 " Windows applications are allowed to use higher native "\ 3486 " priorities. However, with ThreadPriorityPolicy=0, VM will "\ 3487 " not use the highest possible native priority, "\ 3488 " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\ 3489 " system threads. On Linux thread priorities are ignored "\ 3490 " because the OS does not support static priority in "\ 3491 " SCHED_OTHER scheduling class which is the only choice for "\ 3492 " non-root, non-realtime applications. "\ 3493 "1 : Aggressive. "\ 3494 " Java thread priorities map over to the entire range of "\ 3495 " native thread priorities. Higher Java thread priorities map "\ 3496 " to higher native thread priorities. This policy should be "\ 3497 " used with care, as sometimes it can cause performance "\ 3498 " degradation in the application and/or the entire system. On "\ 3499 " Linux this policy requires root privilege.") \ 3500 range(0, 1) \ 3501 \ 3502 product(bool, ThreadPriorityVerbose, false, \ 3503 "Print priority changes") \ 3504 \ 3505 product(intx, CompilerThreadPriority, -1, \ 3506 "The native priority at which compiler threads should run " \ 3507 "(-1 means no change)") \ 3508 range(min_jint, max_jint) \ 3509 constraint(CompilerThreadPriorityConstraintFunc, AfterErgo) \ 3510 \ 3511 product(intx, VMThreadPriority, -1, \ 3512 "The native priority at which the VM thread should run " \ 3513 "(-1 means no change)") \ 3514 range(-1, 127) \ 3515 \ 3516 product(bool, CompilerThreadHintNoPreempt, true, \ 3517 "(Solaris only) Give compiler threads an extra quanta") \ 3518 \ 3519 product(bool, VMThreadHintNoPreempt, false, \ 3520 "(Solaris only) Give VM thread an extra quanta") \ 3521 \ 3522 product(intx, JavaPriority1_To_OSPriority, -1, \ 3523 "Map Java priorities to OS priorities") \ 3524 range(-1, 127) \ 3525 \ 3526 product(intx, JavaPriority2_To_OSPriority, -1, \ 3527 "Map Java priorities to OS priorities") \ 3528 range(-1, 127) \ 3529 \ 3530 product(intx, JavaPriority3_To_OSPriority, -1, \ 3531 "Map Java priorities to OS priorities") \ 3532 range(-1, 127) \ 3533 \ 3534 product(intx, JavaPriority4_To_OSPriority, -1, \ 3535 "Map Java priorities to OS priorities") \ 3536 range(-1, 127) \ 3537 \ 3538 product(intx, JavaPriority5_To_OSPriority, -1, \ 3539 "Map Java priorities to OS priorities") \ 3540 range(-1, 127) \ 3541 \ 3542 product(intx, JavaPriority6_To_OSPriority, -1, \ 3543 "Map Java priorities to OS priorities") \ 3544 range(-1, 127) \ 3545 \ 3546 product(intx, JavaPriority7_To_OSPriority, -1, \ 3547 "Map Java priorities to OS priorities") \ 3548 range(-1, 127) \ 3549 \ 3550 product(intx, JavaPriority8_To_OSPriority, -1, \ 3551 "Map Java priorities to OS priorities") \ 3552 range(-1, 127) \ 3553 \ 3554 product(intx, JavaPriority9_To_OSPriority, -1, \ 3555 "Map Java priorities to OS priorities") \ 3556 range(-1, 127) \ 3557 \ 3558 product(intx, JavaPriority10_To_OSPriority,-1, \ 3559 "Map Java priorities to OS priorities") \ 3560 range(-1, 127) \ 3561 \ 3562 experimental(bool, UseCriticalJavaThreadPriority, false, \ 3563 "Java thread priority 10 maps to critical scheduling priority") \ 3564 \ 3565 experimental(bool, UseCriticalCompilerThreadPriority, false, \ 3566 "Compiler thread(s) run at critical scheduling priority") \ 3567 \ 3568 experimental(bool, UseCriticalCMSThreadPriority, false, \ 3569 "ConcurrentMarkSweep thread runs at critical scheduling priority")\ 3570 \ 3571 /* compiler debugging */ \ 3572 notproduct(intx, CompileTheWorldStartAt, 1, \ 3573 "First class to consider when using +CompileTheWorld") \ 3574 \ 3575 notproduct(intx, CompileTheWorldStopAt, max_jint, \ 3576 "Last class to consider when using +CompileTheWorld") \ 3577 \ 3578 develop(intx, NewCodeParameter, 0, \ 3579 "Testing Only: Create a dedicated integer parameter before " \ 3580 "putback") \ 3581 \ 3582 /* new oopmap storage allocation */ \ 3583 develop(intx, MinOopMapAllocation, 8, \ 3584 "Minimum number of OopMap entries in an OopMapSet") \ 3585 \ 3586 /* Background Compilation */ \ 3587 develop(intx, LongCompileThreshold, 50, \ 3588 "Used with +TraceLongCompiles") \ 3589 \ 3590 /* recompilation */ \ 3591 product_pd(intx, CompileThreshold, \ 3592 "number of interpreted method invocations before (re-)compiling") \ 3593 constraint(CompileThresholdConstraintFunc, AfterErgo) \ 3594 \ 3595 product(double, CompileThresholdScaling, 1.0, \ 3596 "Factor to control when first compilation happens " \ 3597 "(both with and without tiered compilation): " \ 3598 "values greater than 1.0 delay counter overflow, " \ 3599 "values between 0 and 1.0 rush counter overflow, " \ 3600 "value of 1.0 leaves compilation thresholds unchanged " \ 3601 "value of 0.0 is equivalent to -Xint. " \ 3602 "" \ 3603 "Flag can be set as per-method option. " \ 3604 "If a value is specified for a method, compilation thresholds " \ 3605 "for that method are scaled by both the value of the global flag "\ 3606 "and the value of the per-method flag.") \ 3607 range(0.0, DBL_MAX) \ 3608 \ 3609 product(intx, Tier0InvokeNotifyFreqLog, 7, \ 3610 "Interpreter (tier 0) invocation notification frequency") \ 3611 range(0, 30) \ 3612 \ 3613 product(intx, Tier2InvokeNotifyFreqLog, 11, \ 3614 "C1 without MDO (tier 2) invocation notification frequency") \ 3615 range(0, 30) \ 3616 \ 3617 product(intx, Tier3InvokeNotifyFreqLog, 10, \ 3618 "C1 with MDO profiling (tier 3) invocation notification " \ 3619 "frequency") \ 3620 range(0, 30) \ 3621 \ 3622 product(intx, Tier23InlineeNotifyFreqLog, 20, \ 3623 "Inlinee invocation (tiers 2 and 3) notification frequency") \ 3624 range(0, 30) \ 3625 \ 3626 product(intx, Tier0BackedgeNotifyFreqLog, 10, \ 3627 "Interpreter (tier 0) invocation notification frequency") \ 3628 range(0, 30) \ 3629 \ 3630 product(intx, Tier2BackedgeNotifyFreqLog, 14, \ 3631 "C1 without MDO (tier 2) invocation notification frequency") \ 3632 range(0, 30) \ 3633 \ 3634 product(intx, Tier3BackedgeNotifyFreqLog, 13, \ 3635 "C1 with MDO profiling (tier 3) invocation notification " \ 3636 "frequency") \ 3637 range(0, 30) \ 3638 \ 3639 product(intx, Tier2CompileThreshold, 0, \ 3640 "threshold at which tier 2 compilation is invoked") \ 3641 range(0, max_jint) \ 3642 \ 3643 product(intx, Tier2BackEdgeThreshold, 0, \ 3644 "Back edge threshold at which tier 2 compilation is invoked") \ 3645 range(0, max_jint) \ 3646 \ 3647 product(intx, Tier3InvocationThreshold, 200, \ 3648 "Compile if number of method invocations crosses this " \ 3649 "threshold") \ 3650 range(0, max_jint) \ 3651 \ 3652 product(intx, Tier3MinInvocationThreshold, 100, \ 3653 "Minimum invocation to compile at tier 3") \ 3654 range(0, max_jint) \ 3655 \ 3656 product(intx, Tier3CompileThreshold, 2000, \ 3657 "Threshold at which tier 3 compilation is invoked (invocation " \ 3658 "minimum must be satisfied)") \ 3659 range(0, max_jint) \ 3660 \ 3661 product(intx, Tier3BackEdgeThreshold, 60000, \ 3662 "Back edge threshold at which tier 3 OSR compilation is invoked") \ 3663 range(0, max_jint) \ 3664 \ 3665 product(intx, Tier3AOTInvocationThreshold, 10000, \ 3666 "Compile if number of method invocations crosses this " \ 3667 "threshold if coming from AOT") \ 3668 range(0, max_jint) \ 3669 \ 3670 product(intx, Tier3AOTMinInvocationThreshold, 1000, \ 3671 "Minimum invocation to compile at tier 3 if coming from AOT") \ 3672 range(0, max_jint) \ 3673 \ 3674 product(intx, Tier3AOTCompileThreshold, 15000, \ 3675 "Threshold at which tier 3 compilation is invoked (invocation " \ 3676 "minimum must be satisfied) if coming from AOT") \ 3677 range(0, max_jint) \ 3678 \ 3679 product(intx, Tier3AOTBackEdgeThreshold, 120000, \ 3680 "Back edge threshold at which tier 3 OSR compilation is invoked " \ 3681 "if coming from AOT") \ 3682 range(0, max_jint) \ 3683 \ 3684 product(intx, Tier4InvocationThreshold, 5000, \ 3685 "Compile if number of method invocations crosses this " \ 3686 "threshold") \ 3687 range(0, max_jint) \ 3688 \ 3689 product(intx, Tier4MinInvocationThreshold, 600, \ 3690 "Minimum invocation to compile at tier 4") \ 3691 range(0, max_jint) \ 3692 \ 3693 product(intx, Tier4CompileThreshold, 15000, \ 3694 "Threshold at which tier 4 compilation is invoked (invocation " \ 3695 "minimum must be satisfied") \ 3696 range(0, max_jint) \ 3697 \ 3698 product(intx, Tier4BackEdgeThreshold, 40000, \ 3699 "Back edge threshold at which tier 4 OSR compilation is invoked") \ 3700 range(0, max_jint) \ 3701 \ 3702 product(intx, Tier3DelayOn, 5, \ 3703 "If C2 queue size grows over this amount per compiler thread " \ 3704 "stop compiling at tier 3 and start compiling at tier 2") \ 3705 range(0, max_jint) \ 3706 \ 3707 product(intx, Tier3DelayOff, 2, \ 3708 "If C2 queue size is less than this amount per compiler thread " \ 3709 "allow methods compiled at tier 2 transition to tier 3") \ 3710 range(0, max_jint) \ 3711 \ 3712 product(intx, Tier3LoadFeedback, 5, \ 3713 "Tier 3 thresholds will increase twofold when C1 queue size " \ 3714 "reaches this amount per compiler thread") \ 3715 range(0, max_jint) \ 3716 \ 3717 product(intx, Tier4LoadFeedback, 3, \ 3718 "Tier 4 thresholds will increase twofold when C2 queue size " \ 3719 "reaches this amount per compiler thread") \ 3720 range(0, max_jint) \ 3721 \ 3722 product(intx, TieredCompileTaskTimeout, 50, \ 3723 "Kill compile task if method was not used within " \ 3724 "given timeout in milliseconds") \ 3725 range(0, max_intx) \ 3726 \ 3727 product(intx, TieredStopAtLevel, 4, \ 3728 "Stop at given compilation level") \ 3729 range(0, 4) \ 3730 \ 3731 product(intx, Tier0ProfilingStartPercentage, 200, \ 3732 "Start profiling in interpreter if the counters exceed tier 3 " \ 3733 "thresholds by the specified percentage") \ 3734 range(0, max_jint) \ 3735 \ 3736 product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ 3737 "Increase the compile threshold for C1 compilation if the code " \ 3738 "cache is filled by the specified percentage") \ 3739 range(0, 99) \ 3740 \ 3741 product(intx, TieredRateUpdateMinTime, 1, \ 3742 "Minimum rate sampling interval (in milliseconds)") \ 3743 range(0, max_intx) \ 3744 \ 3745 product(intx, TieredRateUpdateMaxTime, 25, \ 3746 "Maximum rate sampling interval (in milliseconds)") \ 3747 range(0, max_intx) \ 3748 \ 3749 product_pd(bool, TieredCompilation, \ 3750 "Enable tiered compilation") \ 3751 \ 3752 product(bool, PrintTieredEvents, false, \ 3753 "Print tiered events notifications") \ 3754 \ 3755 product_pd(intx, OnStackReplacePercentage, \ 3756 "NON_TIERED number of method invocations/branches (expressed as " \ 3757 "% of CompileThreshold) before (re-)compiling OSR code") \ 3758 constraint(OnStackReplacePercentageConstraintFunc, AfterErgo) \ 3759 \ 3760 product(intx, InterpreterProfilePercentage, 33, \ 3761 "NON_TIERED number of method invocations/branches (expressed as " \ 3762 "% of CompileThreshold) before profiling in the interpreter") \ 3763 range(0, 100) \ 3764 \ 3765 develop(intx, MaxRecompilationSearchLength, 10, \ 3766 "The maximum number of frames to inspect when searching for " \ 3767 "recompilee") \ 3768 \ 3769 develop(intx, MaxInterpretedSearchLength, 3, \ 3770 "The maximum number of interpreted frames to skip when searching "\ 3771 "for recompilee") \ 3772 \ 3773 develop(intx, DesiredMethodLimit, 8000, \ 3774 "The desired maximum method size (in bytecodes) after inlining") \ 3775 \ 3776 develop(intx, HugeMethodLimit, 8000, \ 3777 "Don't compile methods larger than this if " \ 3778 "+DontCompileHugeMethods") \ 3779 \ 3780 /* New JDK 1.4 reflection implementation */ \ 3781 \ 3782 develop(intx, FastSuperclassLimit, 8, \ 3783 "Depth of hardwired instanceof accelerator array") \ 3784 \ 3785 /* Properties for Java libraries */ \ 3786 \ 3787 product(uint64_t, MaxDirectMemorySize, 0, \ 3788 "Maximum total size of NIO direct-buffer allocations") \ 3789 range(0, max_jlong) \ 3790 \ 3791 /* Flags used for temporary code during development */ \ 3792 \ 3793 diagnostic(bool, UseNewCode, false, \ 3794 "Testing Only: Use the new version while testing") \ 3795 \ 3796 diagnostic(bool, UseNewCode2, false, \ 3797 "Testing Only: Use the new version while testing") \ 3798 \ 3799 diagnostic(bool, UseNewCode3, false, \ 3800 "Testing Only: Use the new version while testing") \ 3801 \ 3802 /* flags for performance data collection */ \ 3803 \ 3804 product(bool, UsePerfData, true, \ 3805 "Flag to disable jvmstat instrumentation for performance testing "\ 3806 "and problem isolation purposes") \ 3807 \ 3808 product(bool, PerfDataSaveToFile, false, \ 3809 "Save PerfData memory to hsperfdata_<pid> file on exit") \ 3810 \ 3811 product(ccstr, PerfDataSaveFile, NULL, \ 3812 "Save PerfData memory to the specified absolute pathname. " \ 3813 "The string %p in the file name (if present) " \ 3814 "will be replaced by pid") \ 3815 \ 3816 product(intx, PerfDataSamplingInterval, 50, \ 3817 "Data sampling interval (in milliseconds)") \ 3818 range(PeriodicTask::min_interval, max_jint) \ 3819 constraint(PerfDataSamplingIntervalFunc, AfterErgo) \ 3820 \ 3821 product(bool, PerfDisableSharedMem, false, \ 3822 "Store performance data in standard memory") \ 3823 \ 3824 product(intx, PerfDataMemorySize, 32*K, \ 3825 "Size of performance data memory region. Will be rounded " \ 3826 "up to a multiple of the native os page size.") \ 3827 range(128, 32*64*K) \ 3828 \ 3829 product(intx, PerfMaxStringConstLength, 1024, \ 3830 "Maximum PerfStringConstant string length before truncation") \ 3831 range(32, 32*K) \ 3832 \ 3833 product(bool, PerfAllowAtExitRegistration, false, \ 3834 "Allow registration of atexit() methods") \ 3835 \ 3836 product(bool, PerfBypassFileSystemCheck, false, \ 3837 "Bypass Win32 file system criteria checks (Windows Only)") \ 3838 \ 3839 product(intx, UnguardOnExecutionViolation, 0, \ 3840 "Unguard page and retry on no-execute fault (Win32 only) " \ 3841 "0=off, 1=conservative, 2=aggressive") \ 3842 range(0, 2) \ 3843 \ 3844 /* Serviceability Support */ \ 3845 \ 3846 product(bool, ManagementServer, false, \ 3847 "Create JMX Management Server") \ 3848 \ 3849 product(bool, DisableAttachMechanism, false, \ 3850 "Disable mechanism that allows tools to attach to this VM") \ 3851 \ 3852 product(bool, StartAttachListener, false, \ 3853 "Always start Attach Listener at VM startup") \ 3854 \ 3855 product(bool, EnableDynamicAgentLoading, true, \ 3856 "Allow tools to load agents with the attach mechanism") \ 3857 \ 3858 manageable(bool, PrintConcurrentLocks, false, \ 3859 "Print java.util.concurrent locks in thread dump") \ 3860 \ 3861 product(bool, TransmitErrorReport, false, \ 3862 "Enable error report transmission on erroneous termination") \ 3863 \ 3864 product(ccstr, ErrorReportServer, NULL, \ 3865 "Override built-in error report server address") \ 3866 \ 3867 /* Shared spaces */ \ 3868 \ 3869 product(bool, UseSharedSpaces, true, \ 3870 "Use shared spaces for metadata") \ 3871 \ 3872 product(bool, VerifySharedSpaces, false, \ 3873 "Verify shared spaces (false for default archive, true for " \ 3874 "archive specified by -XX:SharedArchiveFile)") \ 3875 \ 3876 product(bool, RequireSharedSpaces, false, \ 3877 "Require shared spaces for metadata") \ 3878 \ 3879 product(bool, DumpSharedSpaces, false, \ 3880 "Special mode: JVM reads a class list, loads classes, builds " \ 3881 "shared spaces, and dumps the shared spaces to a file to be " \ 3882 "used in future JVM runs") \ 3883 \ 3884 product(bool, PrintSharedArchiveAndExit, false, \ 3885 "Print shared archive file contents") \ 3886 \ 3887 product(bool, PrintSharedDictionary, false, \ 3888 "If PrintSharedArchiveAndExit is true, also print the shared " \ 3889 "dictionary") \ 3890 \ 3891 product(size_t, SharedReadWriteSize, DEFAULT_SHARED_READ_WRITE_SIZE, \ 3892 "Size of read-write space for metadata (in bytes)") \ 3893 range(MIN_SHARED_READ_WRITE_SIZE, MAX_SHARED_READ_WRITE_SIZE) \ 3894 constraint(SharedReadWriteSizeConstraintFunc,AfterErgo) \ 3895 \ 3896 product(size_t, SharedReadOnlySize, DEFAULT_SHARED_READ_ONLY_SIZE, \ 3897 "Size of read-only space for metadata (in bytes)") \ 3898 range(MIN_SHARED_READ_ONLY_SIZE, MAX_SHARED_READ_ONLY_SIZE) \ 3899 constraint(SharedReadOnlySizeConstraintFunc,AfterErgo) \ 3900 \ 3901 product(size_t, SharedMiscDataSize, DEFAULT_SHARED_MISC_DATA_SIZE, \ 3902 "Size of the shared miscellaneous data area (in bytes)") \ 3903 range(MIN_SHARED_MISC_DATA_SIZE, MAX_SHARED_MISC_DATA_SIZE) \ 3904 constraint(SharedMiscDataSizeConstraintFunc,AfterErgo) \ 3905 \ 3906 product(size_t, SharedMiscCodeSize, DEFAULT_SHARED_MISC_CODE_SIZE, \ 3907 "Size of the shared miscellaneous code area (in bytes)") \ 3908 range(MIN_SHARED_MISC_CODE_SIZE, MAX_SHARED_MISC_CODE_SIZE) \ 3909 constraint(SharedMiscCodeSizeConstraintFunc,AfterErgo) \ 3910 \ 3911 product(size_t, SharedBaseAddress, LP64_ONLY(32*G) \ 3912 NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \ 3913 "Address to allocate shared memory region for class data") \ 3914 range(0, SIZE_MAX) \ 3915 \ 3916 product(uintx, SharedSymbolTableBucketSize, 4, \ 3917 "Average number of symbols per bucket in shared table") \ 3918 range(2, 246) \ 3919 \ 3920 diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false, \ 3921 "Do not quit -Xshare:dump even if we encounter unverifiable " \ 3922 "classes. Just exclude them from the shared dictionary.") \ 3923 \ 3924 diagnostic(bool, PrintMethodHandleStubs, false, \ 3925 "Print generated stub code for method handles") \ 3926 \ 3927 develop(bool, TraceMethodHandles, false, \ 3928 "trace internal method handle operations") \ 3929 \ 3930 diagnostic(bool, VerifyMethodHandles, trueInDebug, \ 3931 "perform extra checks when constructing method handles") \ 3932 \ 3933 diagnostic(bool, ShowHiddenFrames, false, \ 3934 "show method handle implementation frames (usually hidden)") \ 3935 \ 3936 experimental(bool, TrustFinalNonStaticFields, false, \ 3937 "trust final non-static declarations for constant folding") \ 3938 \ 3939 diagnostic(bool, FoldStableValues, true, \ 3940 "Optimize loads from stable fields (marked w/ @Stable)") \ 3941 \ 3942 develop(bool, TraceInvokeDynamic, false, \ 3943 "trace internal invoke dynamic operations") \ 3944 \ 3945 diagnostic(bool, PauseAtStartup, false, \ 3946 "Causes the VM to pause at startup time and wait for the pause " \ 3947 "file to be removed (default: ./vm.paused.<pid>)") \ 3948 \ 3949 diagnostic(ccstr, PauseAtStartupFile, NULL, \ 3950 "The file to create and for whose removal to await when pausing " \ 3951 "at startup. (default: ./vm.paused.<pid>)") \ 3952 \ 3953 diagnostic(bool, PauseAtExit, false, \ 3954 "Pause and wait for keypress on exit if a debugger is attached") \ 3955 \ 3956 product(bool, ExtendedDTraceProbes, false, \ 3957 "Enable performance-impacting dtrace probes") \ 3958 \ 3959 product(bool, DTraceMethodProbes, false, \ 3960 "Enable dtrace probes for method-entry and method-exit") \ 3961 \ 3962 product(bool, DTraceAllocProbes, false, \ 3963 "Enable dtrace probes for object allocation") \ 3964 \ 3965 product(bool, DTraceMonitorProbes, false, \ 3966 "Enable dtrace probes for monitor events") \ 3967 \ 3968 product(bool, RelaxAccessControlCheck, false, \ 3969 "Relax the access control checks in the verifier") \ 3970 \ 3971 product(uintx, StringTableSize, defaultStringTableSize, \ 3972 "Number of buckets in the interned String table") \ 3973 range(minimumStringTableSize, 111*defaultStringTableSize) \ 3974 \ 3975 experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \ 3976 "Number of buckets in the JVM internal Symbol table") \ 3977 range(minimumSymbolTableSize, 111*defaultSymbolTableSize) \ 3978 \ 3979 product(bool, UseStringDeduplication, false, \ 3980 "Use string deduplication") \ 3981 \ 3982 product(uintx, StringDeduplicationAgeThreshold, 3, \ 3983 "A string must reach this age (or be promoted to an old region) " \ 3984 "to be considered for deduplication") \ 3985 range(1, markOopDesc::max_age) \ 3986 \ 3987 diagnostic(bool, StringDeduplicationResizeALot, false, \ 3988 "Force table resize every time the table is scanned") \ 3989 \ 3990 diagnostic(bool, StringDeduplicationRehashALot, false, \ 3991 "Force table rehash every time the table is scanned") \ 3992 \ 3993 diagnostic(bool, WhiteBoxAPI, false, \ 3994 "Enable internal testing APIs") \ 3995 \ 3996 experimental(intx, SurvivorAlignmentInBytes, 0, \ 3997 "Default survivor space alignment in bytes") \ 3998 constraint(SurvivorAlignmentInBytesConstraintFunc,AfterErgo) \ 3999 \ 4000 product(bool , AllowNonVirtualCalls, false, \ 4001 "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \ 4002 \ 4003 product(ccstr, DumpLoadedClassList, NULL, \ 4004 "Dump the names all loaded classes, that could be stored into " \ 4005 "the CDS archive, in the specified file") \ 4006 \ 4007 product(ccstr, SharedClassListFile, NULL, \ 4008 "Override the default CDS class list") \ 4009 \ 4010 diagnostic(ccstr, SharedArchiveFile, NULL, \ 4011 "Override the default location of the CDS archive file") \ 4012 \ 4013 product(ccstr, ExtraSharedClassListFile, NULL, \ 4014 "Extra classlist for building the CDS archive file") \ 4015 \ 4016 experimental(size_t, ArrayAllocatorMallocLimit, \ 4017 SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \ 4018 "Allocation less than this value will be allocated " \ 4019 "using malloc. Larger allocations will use mmap.") \ 4020 \ 4021 experimental(bool, AlwaysAtomicAccesses, false, \ 4022 "Accesses to all variables should always be atomic") \ 4023 \ 4024 product(bool, EnableTracing, false, \ 4025 "Enable event-based tracing") \ 4026 \ 4027 product(bool, UseLockedTracing, false, \ 4028 "Use locked-tracing when doing event-based tracing") \ 4029 \ 4030 diagnostic(bool, UseUnalignedAccesses, false, \ 4031 "Use unaligned memory accesses in Unsafe") \ 4032 \ 4033 product_pd(bool, PreserveFramePointer, \ 4034 "Use the FP register for holding the frame pointer " \ 4035 "and not as a general purpose register.") \ 4036 \ 4037 diagnostic(bool, CheckIntrinsics, true, \ 4038 "When a class C is loaded, check that " \ 4039 "(1) all intrinsics defined by the VM for class C are present "\ 4040 "in the loaded class file and are marked with the " \ 4041 "@HotSpotIntrinsicCandidate annotation, that " \ 4042 "(2) there is an intrinsic registered for all loaded methods " \ 4043 "that are annotated with the @HotSpotIntrinsicCandidate " \ 4044 "annotation, and that " \ 4045 "(3) no orphan methods exist for class C (i.e., methods for " \ 4046 "which the VM declares an intrinsic but that are not declared "\ 4047 "in the loaded class C. " \ 4048 "Check (3) is available only in debug builds.") \ 4049 \ 4050 diagnostic_pd(intx, InitArrayShortSize, \ 4051 "Threshold small size (in bytes) for clearing arrays. " \ 4052 "Anything this size or smaller may get converted to discrete " \ 4053 "scalar stores.") \ 4054 range(0, max_intx) \ 4055 constraint(InitArrayShortSizeConstraintFunc, AfterErgo) \ 4056 \ 4057 diagnostic(bool, CompilerDirectivesIgnoreCompileCommands, false, \ 4058 "Disable backwards compatibility for compile commands.") \ 4059 \ 4060 diagnostic(bool, CompilerDirectivesPrint, false, \ 4061 "Print compiler directives on installation.") \ 4062 diagnostic(int, CompilerDirectivesLimit, 50, \ 4063 "Limit on number of compiler directives.") 4064 4065 4066/* 4067 * Macros for factoring of globals 4068 */ 4069 4070// Interface macros 4071#define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4072#define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name; 4073#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name; 4074#define DECLARE_PD_DIAGNOSTIC_FLAG(type, name, doc) extern "C" type name; 4075#define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name; 4076#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name; 4077#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name; 4078#ifdef PRODUCT 4079#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) const type name = value; 4080#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) const type name = pd_##name; 4081#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) const type name = value; 4082#else 4083#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name; 4084#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name; 4085#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4086#endif // PRODUCT 4087// Special LP64 flags, product only needed for now. 4088#ifdef _LP64 4089#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4090#else 4091#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value; 4092#endif // _LP64 4093 4094// Implementation macros 4095#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value; 4096#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name; 4097#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value; 4098#define MATERIALIZE_PD_DIAGNOSTIC_FLAG(type, name, doc) type name = pd_##name; 4099#define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value; 4100#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value; 4101#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value; 4102#ifdef PRODUCT 4103#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) 4104#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) 4105#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) 4106#else 4107#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value; 4108#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name; 4109#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value; 4110#endif // PRODUCT 4111#ifdef _LP64 4112#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value; 4113#else 4114#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */ 4115#endif // _LP64 4116 4117// Only materialize src code for range checking when required, ignore otherwise 4118#define IGNORE_RANGE(a, b) 4119// Only materialize src code for contraint checking when required, ignore otherwise 4120#define IGNORE_CONSTRAINT(func,type) 4121 4122#define IGNORE_WRITEABLE(type) 4123 4124RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4125 DECLARE_PD_DEVELOPER_FLAG, \ 4126 DECLARE_PRODUCT_FLAG, \ 4127 DECLARE_PD_PRODUCT_FLAG, \ 4128 DECLARE_DIAGNOSTIC_FLAG, \ 4129 DECLARE_PD_DIAGNOSTIC_FLAG, \ 4130 DECLARE_EXPERIMENTAL_FLAG, \ 4131 DECLARE_NOTPRODUCT_FLAG, \ 4132 DECLARE_MANAGEABLE_FLAG, \ 4133 DECLARE_PRODUCT_RW_FLAG, \ 4134 DECLARE_LP64_PRODUCT_FLAG, \ 4135 IGNORE_RANGE, \ 4136 IGNORE_CONSTRAINT, \ 4137 IGNORE_WRITEABLE) 4138 4139RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4140 DECLARE_PD_DEVELOPER_FLAG, \ 4141 DECLARE_PRODUCT_FLAG, \ 4142 DECLARE_PD_PRODUCT_FLAG, \ 4143 DECLARE_DIAGNOSTIC_FLAG, \ 4144 DECLARE_PD_DIAGNOSTIC_FLAG, \ 4145 DECLARE_NOTPRODUCT_FLAG, \ 4146 IGNORE_RANGE, \ 4147 IGNORE_CONSTRAINT, \ 4148 IGNORE_WRITEABLE) 4149 4150ARCH_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4151 DECLARE_PRODUCT_FLAG, \ 4152 DECLARE_DIAGNOSTIC_FLAG, \ 4153 DECLARE_EXPERIMENTAL_FLAG, \ 4154 DECLARE_NOTPRODUCT_FLAG, \ 4155 IGNORE_RANGE, \ 4156 IGNORE_CONSTRAINT, \ 4157 IGNORE_WRITEABLE) 4158 4159// Extensions 4160 4161#include "runtime/globals_ext.hpp" 4162 4163#endif // SHARE_VM_RUNTIME_GLOBALS_HPP 4164