globals.hpp revision 12013:bc2c975bc342
1169689Skan/* 2169689Skan * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3169689Skan * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4169689Skan * 5169689Skan * This code is free software; you can redistribute it and/or modify it 6169689Skan * under the terms of the GNU General Public License version 2 only, as 7169689Skan * published by the Free Software Foundation. 8169689Skan * 9169689Skan * This code is distributed in the hope that it will be useful, but WITHOUT 10169689Skan * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11169689Skan * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12169689Skan * version 2 for more details (a copy is included in the LICENSE file that 13169689Skan * accompanied this code). 14169689Skan * 15169689Skan * You should have received a copy of the GNU General Public License version 16169689Skan * 2 along with this work; if not, write to the Free Software Foundation, 17169689Skan * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18169689Skan * 19169689Skan * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20169689Skan * or visit www.oracle.com if you need additional information or have any 21169689Skan * questions. 22169689Skan * 23169689Skan */ 24169689Skan 25169689Skan#ifndef SHARE_VM_RUNTIME_GLOBALS_HPP 26169689Skan#define SHARE_VM_RUNTIME_GLOBALS_HPP 27169689Skan 28169689Skan#include "utilities/debug.hpp" 29169689Skan#include "utilities/macros.hpp" 30169689Skan 31169689Skan#include <float.h> // for DBL_MAX 32169689Skan 33169689Skan// use this for flags that are true per default in the tiered build 34169689Skan// but false in non-tiered builds, and vice versa 35169689Skan#ifdef TIERED 36169689Skan#define trueInTiered true 37169689Skan#define falseInTiered false 38169689Skan#else 39169689Skan#define trueInTiered false 40169689Skan#define falseInTiered true 41169689Skan#endif 42169689Skan 43169689Skan#include CPU_HEADER(globals) 44169689Skan#include OS_HEADER(globals) 45169689Skan#include OS_CPU_HEADER(globals) 46169689Skan#ifdef COMPILER1 47169689Skan#include CPU_HEADER(c1_globals) 48169689Skan#include OS_HEADER(c1_globals) 49169689Skan#endif 50169689Skan#ifdef COMPILER2 51169689Skan#include CPU_HEADER(c2_globals) 52169689Skan#include OS_HEADER(c2_globals) 53169689Skan#endif 54169689Skan#ifdef SHARK 55169689Skan#ifdef ZERO 56169689Skan# include "shark_globals_zero.hpp" 57169689Skan#endif 58169689Skan#endif 59169689Skan 60169689Skan#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !INCLUDE_JVMCI 61169689Skandefine_pd_global(bool, BackgroundCompilation, false); 62169689Skandefine_pd_global(bool, UseTLAB, false); 63169689Skandefine_pd_global(bool, CICompileOSR, false); 64169689Skandefine_pd_global(bool, UseTypeProfile, false); 65169689Skandefine_pd_global(bool, UseOnStackReplacement, false); 66169689Skandefine_pd_global(bool, InlineIntrinsics, false); 67169689Skandefine_pd_global(bool, PreferInterpreterNativeStubs, true); 68169689Skandefine_pd_global(bool, ProfileInterpreter, false); 69169689Skandefine_pd_global(bool, ProfileTraps, false); 70169689Skandefine_pd_global(bool, TieredCompilation, false); 71169689Skan 72169689Skandefine_pd_global(intx, CompileThreshold, 0); 73169689Skan 74169689Skandefine_pd_global(intx, OnStackReplacePercentage, 0); 75169689Skandefine_pd_global(bool, ResizeTLAB, false); 76169689Skandefine_pd_global(intx, FreqInlineSize, 0); 77169689Skandefine_pd_global(size_t, NewSizeThreadIncrease, 4*K); 78169689Skandefine_pd_global(intx, InlineClassNatives, true); 79169689Skandefine_pd_global(intx, InlineUnsafeOps, true); 80169689Skandefine_pd_global(intx, InitialCodeCacheSize, 160*K); 81169689Skandefine_pd_global(intx, ReservedCodeCacheSize, 32*M); 82169689Skandefine_pd_global(intx, NonProfiledCodeHeapSize, 0); 83169689Skandefine_pd_global(intx, ProfiledCodeHeapSize, 0); 84169689Skandefine_pd_global(intx, NonNMethodCodeHeapSize, 32*M); 85169689Skan 86169689Skandefine_pd_global(intx, CodeCacheExpansionSize, 32*K); 87169689Skandefine_pd_global(intx, CodeCacheMinBlockLength, 1); 88169689Skandefine_pd_global(intx, CodeCacheMinimumUseSpace, 200*K); 89169689Skandefine_pd_global(size_t, MetaspaceSize, ScaleForWordSize(4*M)); 90169689Skandefine_pd_global(bool, NeverActAsServerClassMachine, true); 91169689Skandefine_pd_global(uint64_t,MaxRAM, 1ULL*G); 92169689Skan#define CI_COMPILER_COUNT 0 93169689Skan#else 94169689Skan 95169689Skan#if defined(COMPILER2) || INCLUDE_JVMCI 96169689Skan#define CI_COMPILER_COUNT 2 97169689Skan#else 98169689Skan#define CI_COMPILER_COUNT 1 99169689Skan#endif // COMPILER2 || INCLUDE_JVMCI 100169689Skan 101169689Skan#endif // no compilers 102169689Skan 103169689Skan// string type aliases used only in this file 104169689Skantypedef const char* ccstr; 105169689Skantypedef const char* ccstrlist; // represents string arguments which accumulate 106169689Skan 107169689Skan// function type that will construct default range string 108169689Skantypedef const char* (*RangeStrFunc)(void); 109169689Skan 110169689Skanstruct Flag { 111169689Skan enum Flags { 112169689Skan // latest value origin 113169689Skan DEFAULT = 0, 114169689Skan COMMAND_LINE = 1, 115169689Skan ENVIRON_VAR = 2, 116169689Skan CONFIG_FILE = 3, 117169689Skan MANAGEMENT = 4, 118169689Skan ERGONOMIC = 5, 119169689Skan ATTACH_ON_DEMAND = 6, 120169689Skan INTERNAL = 7, 121169689Skan 122169689Skan LAST_VALUE_ORIGIN = INTERNAL, 123169689Skan VALUE_ORIGIN_BITS = 4, 124169689Skan VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS), 125169689Skan 126169689Skan // flag kind 127169689Skan KIND_PRODUCT = 1 << 4, 128169689Skan KIND_MANAGEABLE = 1 << 5, 129169689Skan KIND_DIAGNOSTIC = 1 << 6, 130169689Skan KIND_EXPERIMENTAL = 1 << 7, 131169689Skan KIND_NOT_PRODUCT = 1 << 8, 132169689Skan KIND_DEVELOP = 1 << 9, 133169689Skan KIND_PLATFORM_DEPENDENT = 1 << 10, 134169689Skan KIND_READ_WRITE = 1 << 11, 135169689Skan KIND_C1 = 1 << 12, 136169689Skan KIND_C2 = 1 << 13, 137169689Skan KIND_ARCH = 1 << 14, 138169689Skan KIND_SHARK = 1 << 15, 139169689Skan KIND_LP64_PRODUCT = 1 << 16, 140169689Skan KIND_COMMERCIAL = 1 << 17, 141169689Skan KIND_JVMCI = 1 << 18, 142169689Skan 143169689Skan // set this bit if the flag was set on the command line 144169689Skan ORIG_COMMAND_LINE = 1 << 19, 145169689Skan 146169689Skan KIND_MASK = ~(VALUE_ORIGIN_MASK | ORIG_COMMAND_LINE) 147169689Skan }; 148169689Skan 149169689Skan enum Error { 150169689Skan // no error 151169689Skan SUCCESS = 0, 152169689Skan // flag name is missing 153169689Skan MISSING_NAME, 154169689Skan // flag value is missing 155169689Skan MISSING_VALUE, 156169689Skan // error parsing the textual form of the value 157169689Skan WRONG_FORMAT, 158169689Skan // flag is not writable 159169689Skan NON_WRITABLE, 160169689Skan // flag value is outside of its bounds 161169689Skan OUT_OF_BOUNDS, 162169689Skan // flag value violates its constraint 163169689Skan VIOLATES_CONSTRAINT, 164169689Skan // there is no flag with the given name 165169689Skan INVALID_FLAG, 166169689Skan // the flag can only be set only on command line during invocation of the VM 167169689Skan COMMAND_LINE_ONLY, 168169689Skan // the flag may only be set once 169169689Skan SET_ONLY_ONCE, 170169689Skan // the flag is not writable in this combination of product/debug build 171169689Skan CONSTANT, 172169689Skan // other, unspecified error related to setting the flag 173169689Skan ERR_OTHER 174169689Skan }; 175169689Skan 176169689Skan enum MsgType { 177169689Skan NONE = 0, 178169689Skan DIAGNOSTIC_FLAG_BUT_LOCKED, 179169689Skan EXPERIMENTAL_FLAG_BUT_LOCKED, 180169689Skan DEVELOPER_FLAG_BUT_PRODUCT_BUILD, 181169689Skan NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD 182169689Skan }; 183169689Skan 184169689Skan const char* _type; 185169689Skan const char* _name; 186169689Skan void* _addr; 187169689Skan NOT_PRODUCT(const char* _doc;) 188169689Skan Flags _flags; 189169689Skan 190169689Skan // points to all Flags static array 191169689Skan static Flag* flags; 192169689Skan 193169689Skan // number of flags 194169689Skan static size_t numFlags; 195169689Skan 196169689Skan static Flag* find_flag(const char* name) { return find_flag(name, strlen(name), true, true); }; 197169689Skan static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false); 198169689Skan static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false); 199169689Skan 200169689Skan static const char* get_int_default_range_str(); 201169689Skan static const char* get_uint_default_range_str(); 202169689Skan static const char* get_intx_default_range_str(); 203169689Skan static const char* get_uintx_default_range_str(); 204169689Skan static const char* get_uint64_t_default_range_str(); 205169689Skan static const char* get_size_t_default_range_str(); 206169689Skan static const char* get_double_default_range_str(); 207169689Skan 208169689Skan Flag::Error check_writable(bool changed); 209169689Skan 210169689Skan bool is_bool() const; 211169689Skan bool get_bool() const; 212169689Skan Flag::Error set_bool(bool value); 213169689Skan 214169689Skan bool is_int() const; 215169689Skan int get_int() const; 216169689Skan Flag::Error set_int(int value); 217169689Skan 218169689Skan bool is_uint() const; 219169689Skan uint get_uint() const; 220169689Skan Flag::Error set_uint(uint value); 221169689Skan 222169689Skan bool is_intx() const; 223169689Skan intx get_intx() const; 224169689Skan Flag::Error set_intx(intx value); 225169689Skan 226169689Skan bool is_uintx() const; 227169689Skan uintx get_uintx() const; 228169689Skan Flag::Error set_uintx(uintx value); 229169689Skan 230169689Skan bool is_uint64_t() const; 231169689Skan uint64_t get_uint64_t() const; 232169689Skan Flag::Error set_uint64_t(uint64_t value); 233169689Skan 234169689Skan bool is_size_t() const; 235169689Skan size_t get_size_t() const; 236169689Skan Flag::Error set_size_t(size_t value); 237169689Skan 238169689Skan bool is_double() const; 239169689Skan double get_double() const; 240169689Skan Flag::Error set_double(double value); 241169689Skan 242169689Skan bool is_ccstr() const; 243169689Skan bool ccstr_accumulates() const; 244169689Skan ccstr get_ccstr() const; 245169689Skan Flag::Error set_ccstr(ccstr value); 246169689Skan 247169689Skan Flags get_origin(); 248169689Skan void set_origin(Flags origin); 249169689Skan 250169689Skan bool is_default(); 251169689Skan bool is_ergonomic(); 252169689Skan bool is_command_line(); 253169689Skan void set_command_line(); 254169689Skan 255169689Skan bool is_product() const; 256169689Skan bool is_manageable() const; 257169689Skan bool is_diagnostic() const; 258169689Skan bool is_experimental() const; 259169689Skan bool is_notproduct() const; 260169689Skan bool is_develop() const; 261169689Skan bool is_read_write() const; 262169689Skan bool is_commercial() const; 263169689Skan 264169689Skan bool is_constant_in_binary() const; 265169689Skan 266169689Skan bool is_unlocker() const; 267169689Skan bool is_unlocked() const; 268169689Skan bool is_writeable() const; 269169689Skan bool is_external() const; 270169689Skan 271169689Skan bool is_unlocker_ext() const; 272169689Skan bool is_unlocked_ext() const; 273169689Skan bool is_writeable_ext() const; 274169689Skan bool is_external_ext() const; 275169689Skan 276169689Skan void unlock_diagnostic(); 277169689Skan 278169689Skan Flag::MsgType get_locked_message(char*, int) const; 279169689Skan void get_locked_message_ext(char*, int) const; 280169689Skan 281169689Skan // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges 282169689Skan void print_on(outputStream* st, bool withComments = false, bool printRanges = false); 283169689Skan void print_kind_and_origin(outputStream* st); 284169689Skan void print_as_flag(outputStream* st); 285169689Skan 286169689Skan static const char* flag_error_str(Flag::Error error); 287169689Skan}; 288169689Skan 289169689Skan// debug flags control various aspects of the VM and are global accessible 290169689Skan 291169689Skan// use FlagSetting to temporarily change some debug flag 292169689Skan// e.g. FlagSetting fs(DebugThisAndThat, true); 293169689Skan// restored to previous value upon leaving scope 294169689Skanclass FlagSetting { 295169689Skan bool val; 296169689Skan bool* flag; 297169689Skan public: 298169689Skan FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; } 299169689Skan ~FlagSetting() { *flag = val; } 300169689Skan}; 301169689Skan 302169689Skan 303169689Skanclass CounterSetting { 304169689Skan intx* counter; 305169689Skan public: 306169689Skan CounterSetting(intx* cnt) { counter = cnt; (*counter)++; } 307169689Skan ~CounterSetting() { (*counter)--; } 308169689Skan}; 309169689Skan 310169689Skanclass IntFlagSetting { 311169689Skan int val; 312169689Skan int* flag; 313169689Skan public: 314169689Skan IntFlagSetting(int& fl, int newValue) { flag = &fl; val = fl; fl = newValue; } 315169689Skan ~IntFlagSetting() { *flag = val; } 316169689Skan}; 317169689Skan 318169689Skanclass UIntFlagSetting { 319169689Skan uint val; 320169689Skan uint* flag; 321169689Skan public: 322169689Skan UIntFlagSetting(uint& fl, uint newValue) { flag = &fl; val = fl; fl = newValue; } 323169689Skan ~UIntFlagSetting() { *flag = val; } 324169689Skan}; 325169689Skan 326169689Skanclass UIntXFlagSetting { 327169689Skan uintx val; 328169689Skan uintx* flag; 329169689Skan public: 330169689Skan UIntXFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; } 331169689Skan ~UIntXFlagSetting() { *flag = val; } 332169689Skan}; 333169689Skan 334169689Skanclass DoubleFlagSetting { 335169689Skan double val; 336169689Skan double* flag; 337169689Skan public: 338169689Skan DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; } 339169689Skan ~DoubleFlagSetting() { *flag = val; } 340169689Skan}; 341169689Skan 342169689Skanclass SizeTFlagSetting { 343169689Skan size_t val; 344169689Skan size_t* flag; 345169689Skan public: 346169689Skan SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; } 347169689Skan ~SizeTFlagSetting() { *flag = val; } 348169689Skan}; 349169689Skan 350169689Skan// Helper class for temporarily saving the value of a flag during a scope. 351169689Skantemplate <size_t SIZE> 352169689Skanclass FlagGuard { 353169689Skan unsigned char _value[SIZE]; 354169689Skan void* const _addr; 355169689Skan 356169689Skan // Hide operator new, this class should only be allocated on the stack. 357169689Skan // NOTE: Cannot include memory/allocation.hpp here due to circular 358169689Skan // dependencies. 359169689Skan void* operator new(size_t size) throw(); 360169689Skan void* operator new [](size_t size) throw(); 361169689Skan 362169689Skan public: 363169689Skan FlagGuard(void* flag_addr) : _addr(flag_addr) { 364169689Skan memcpy(_value, _addr, SIZE); 365169689Skan } 366169689Skan 367169689Skan ~FlagGuard() { 368169689Skan memcpy(_addr, _value, SIZE); 369169689Skan } 370169689Skan}; 371169689Skan 372169689Skan#define FLAG_GUARD(f) FlagGuard<sizeof(f)> f ## _guard(&f) 373169689Skan 374169689Skanclass CommandLineFlags { 375169689Skanpublic: 376169689Skan static Flag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false); 377169689Skan static Flag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); } 378169689Skan static Flag::Error boolAtPut(Flag* flag, bool* value, Flag::Flags origin); 379169689Skan static Flag::Error boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin); 380169689Skan static Flag::Error boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); } 381169689Skan 382169689Skan static Flag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false); 383169689Skan static Flag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); } 384169689Skan static Flag::Error intAtPut(Flag* flag, int* value, Flag::Flags origin); 385169689Skan static Flag::Error intAtPut(const char* name, size_t len, int* value, Flag::Flags origin); 386169689Skan static Flag::Error intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); } 387169689Skan 388169689Skan static Flag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false); 389169689Skan static Flag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); } 390169689Skan static Flag::Error uintAtPut(Flag* flag, uint* value, Flag::Flags origin); 391169689Skan static Flag::Error uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin); 392169689Skan static Flag::Error uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); } 393169689Skan 394169689Skan static Flag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false); 395169689Skan static Flag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); } 396169689Skan static Flag::Error intxAtPut(Flag* flag, intx* value, Flag::Flags origin); 397169689Skan static Flag::Error intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin); 398169689Skan static Flag::Error intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); } 399169689Skan 400169689Skan static Flag::Error uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false); 401169689Skan static Flag::Error uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); } 402169689Skan static Flag::Error uintxAtPut(Flag* flag, uintx* value, Flag::Flags origin); 403169689Skan static Flag::Error uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin); 404169689Skan static Flag::Error uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); } 405169689Skan 406169689Skan static Flag::Error size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false); 407169689Skan static Flag::Error size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); } 408169689Skan static Flag::Error size_tAtPut(Flag* flag, size_t* value, Flag::Flags origin); 409169689Skan static Flag::Error size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin); 410169689Skan static Flag::Error size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); } 411169689Skan 412169689Skan static Flag::Error uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false); 413169689Skan static Flag::Error uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); } 414169689Skan static Flag::Error uint64_tAtPut(Flag* flag, uint64_t* value, Flag::Flags origin); 415169689Skan static Flag::Error uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin); 416169689Skan static Flag::Error uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); } 417169689Skan 418169689Skan static Flag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false); 419169689Skan static Flag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); } 420169689Skan static Flag::Error doubleAtPut(Flag* flag, double* value, Flag::Flags origin); 421169689Skan static Flag::Error doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin); 422169689Skan static Flag::Error doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); } 423169689Skan 424169689Skan static Flag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false); 425169689Skan static Flag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); } 426169689Skan // Contract: Flag will make private copy of the incoming value. 427169689Skan // Outgoing value is always malloc-ed, and caller MUST call free. 428169689Skan static Flag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin); 429169689Skan static Flag::Error ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); } 430169689Skan 431169689Skan // Returns false if name is not a command line flag. 432169689Skan static bool wasSetOnCmdline(const char* name, bool* value); 433169689Skan static void printSetFlags(outputStream* out); 434169689Skan 435169689Skan // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges 436169689Skan static void printFlags(outputStream* out, bool withComments, bool printRanges = false); 437169689Skan 438169689Skan static void verify() PRODUCT_RETURN; 439169689Skan}; 440169689Skan 441169689Skan// use this for flags that are true by default in the debug version but 442169689Skan// false in the optimized version, and vice versa 443169689Skan#ifdef ASSERT 444169689Skan#define trueInDebug true 445169689Skan#define falseInDebug false 446169689Skan#else 447169689Skan#define trueInDebug false 448169689Skan#define falseInDebug true 449169689Skan#endif 450169689Skan 451169689Skan// use this for flags that are true per default in the product build 452169689Skan// but false in development builds, and vice versa 453169689Skan#ifdef PRODUCT 454169689Skan#define trueInProduct true 455169689Skan#define falseInProduct false 456169689Skan#else 457169689Skan#define trueInProduct false 458169689Skan#define falseInProduct true 459169689Skan#endif 460169689Skan 461169689Skan// develop flags are settable / visible only during development and are constant in the PRODUCT version 462169689Skan// product flags are always settable / visible 463169689Skan// notproduct flags are settable / visible only during development and are not declared in the PRODUCT version 464169689Skan 465169689Skan// A flag must be declared with one of the following types: 466169689Skan// bool, int, uint, intx, uintx, size_t, ccstr, double, or uint64_t. 467169689Skan// The type "ccstr" is an alias for "const char*" and is used 468169689Skan// only in this file, because the macrology requires single-token type names. 469169689Skan 470169689Skan// Note: Diagnostic options not meant for VM tuning or for product modes. 471169689Skan// They are to be used for VM quality assurance or field diagnosis 472169689Skan// of VM bugs. They are hidden so that users will not be encouraged to 473169689Skan// try them as if they were VM ordinary execution options. However, they 474169689Skan// are available in the product version of the VM. Under instruction 475169689Skan// from support engineers, VM customers can turn them on to collect 476169689Skan// diagnostic information about VM problems. To use a VM diagnostic 477169689Skan// option, you must first specify +UnlockDiagnosticVMOptions. 478169689Skan// (This master switch also affects the behavior of -Xprintflags.) 479169689Skan// 480169689Skan// experimental flags are in support of features that are not 481169689Skan// part of the officially supported product, but are available 482169689Skan// for experimenting with. They could, for example, be performance 483169689Skan// features that may not have undergone full or rigorous QA, but which may 484169689Skan// help performance in some cases and released for experimentation 485169689Skan// by the community of users and developers. This flag also allows one to 486169689Skan// be able to build a fully supported product that nonetheless also 487169689Skan// ships with some unsupported, lightly tested, experimental features. 488169689Skan// Like the UnlockDiagnosticVMOptions flag above, there is a corresponding 489169689Skan// UnlockExperimentalVMOptions flag, which allows the control and 490169689Skan// modification of the experimental flags. 491169689Skan// 492169689Skan// Nota bene: neither diagnostic nor experimental options should be used casually, 493169689Skan// and they are not supported on production loads, except under explicit 494169689Skan// direction from support engineers. 495169689Skan// 496169689Skan// manageable flags are writeable external product flags. 497169689Skan// They are dynamically writeable through the JDK management interface 498169689Skan// (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole. 499169689Skan// These flags are external exported interface (see CCC). The list of 500169689Skan// manageable flags can be queried programmatically through the management 501169689Skan// interface. 502169689Skan// 503169689Skan// A flag can be made as "manageable" only if 504169689Skan// - the flag is defined in a CCC as an external exported interface. 505169689Skan// - the VM implementation supports dynamic setting of the flag. 506169689Skan// This implies that the VM must *always* query the flag variable 507169689Skan// and not reuse state related to the flag state at any given time. 508169689Skan// - you want the flag to be queried programmatically by the customers. 509169689Skan// 510169689Skan// product_rw flags are writeable internal product flags. 511169689Skan// They are like "manageable" flags but for internal/private use. 512169689Skan// The list of product_rw flags are internal/private flags which 513169689Skan// may be changed/removed in a future release. It can be set 514169689Skan// through the management interface to get/set value 515169689Skan// when the name of flag is supplied. 516169689Skan// 517169689Skan// A flag can be made as "product_rw" only if 518169689Skan// - the VM implementation supports dynamic setting of the flag. 519169689Skan// This implies that the VM must *always* query the flag variable 520169689Skan// and not reuse state related to the flag state at any given time. 521169689Skan// 522169689Skan// Note that when there is a need to support develop flags to be writeable, 523169689Skan// it can be done in the same way as product_rw. 524169689Skan// 525169689Skan// range is a macro that will expand to min and max arguments for range 526169689Skan// checking code if provided - see commandLineFlagRangeList.hpp 527169689Skan// 528169689Skan// constraint is a macro that will expand to custom function call 529169689Skan// for constraint checking if provided - see commandLineFlagConstraintList.hpp 530169689Skan// 531169689Skan// writeable is a macro that controls if and how the value can change during the runtime 532169689Skan// 533169689Skan// writeable(Always) is optional and allows the flag to have its value changed 534169689Skan// without any limitations at any time 535169689Skan// 536169689Skan// writeable(Once) flag value's can be only set once during the lifetime of VM 537169689Skan// 538169689Skan// writeable(CommandLineOnly) flag value's can be only set from command line 539169689Skan// (multiple times allowed) 540169689Skan// 541169689Skan 542169689Skan 543169689Skan#define RUNTIME_FLAGS(develop, \ 544169689Skan develop_pd, \ 545169689Skan product, \ 546169689Skan product_pd, \ 547169689Skan diagnostic, \ 548169689Skan diagnostic_pd, \ 549169689Skan experimental, \ 550169689Skan notproduct, \ 551169689Skan manageable, \ 552169689Skan product_rw, \ 553169689Skan lp64_product, \ 554169689Skan range, \ 555169689Skan constraint, \ 556169689Skan writeable) \ 557169689Skan \ 558169689Skan lp64_product(bool, UseCompressedOops, false, \ 559169689Skan "Use 32-bit object references in 64-bit VM. " \ 560169689Skan "lp64_product means flag is always constant in 32 bit VM") \ 561169689Skan \ 562169689Skan lp64_product(bool, UseCompressedClassPointers, false, \ 563169689Skan "Use 32-bit class pointers in 64-bit VM. " \ 564169689Skan "lp64_product means flag is always constant in 32 bit VM") \ 565169689Skan \ 566169689Skan notproduct(bool, CheckCompressedOops, true, \ 567169689Skan "Generate checks in encoding/decoding code in debug VM") \ 568169689Skan \ 569169689Skan product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \ 570169689Skan "Heap allocation steps through preferred address regions to find" \ 571169689Skan " where it can allocate the heap. Number of steps to take per " \ 572169689Skan "region.") \ 573169689Skan range(1, max_uintx) \ 574169689Skan \ 575169689Skan lp64_product(intx, ObjectAlignmentInBytes, 8, \ 576169689Skan "Default object alignment in bytes, 8 is minimum") \ 577169689Skan range(8, 256) \ 578169689Skan constraint(ObjectAlignmentInBytesConstraintFunc,AtParse) \ 579169689Skan \ 580169689Skan product(bool, AssumeMP, false, \ 581169689Skan "Instruct the VM to assume multiple processors are available") \ 582169689Skan \ 583169689Skan /* UseMembar is theoretically a temp flag used for memory barrier */ \ 584169689Skan /* removal testing. It was supposed to be removed before FCS but has */ \ 585169689Skan /* been re-added (see 6401008) */ \ 586169689Skan product_pd(bool, UseMembar, \ 587169689Skan "(Unstable) Issues membars on thread state transitions") \ 588169689Skan \ 589169689Skan develop(bool, CleanChunkPoolAsync, true, \ 590169689Skan "Clean the chunk pool asynchronously") \ 591169689Skan \ 592169689Skan experimental(bool, AlwaysSafeConstructors, false, \ 593169689Skan "Force safe construction, as if all fields are final.") \ 594169689Skan \ 595169689Skan diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \ 596169689Skan "Enable normal processing of flags relating to field diagnostics")\ 597169689Skan \ 598169689Skan experimental(bool, UnlockExperimentalVMOptions, false, \ 599169689Skan "Enable normal processing of flags relating to experimental " \ 600169689Skan "features") \ 601169689Skan \ 602169689Skan product(bool, JavaMonitorsInStackTrace, true, \ 603169689Skan "Print information about Java monitor locks when the stacks are" \ 604169689Skan "dumped") \ 605169689Skan \ 606169689Skan product_pd(bool, UseLargePages, \ 607169689Skan "Use large page memory") \ 608169689Skan \ 609169689Skan product_pd(bool, UseLargePagesIndividualAllocation, \ 610169689Skan "Allocate large pages individually for better affinity") \ 611169689Skan \ 612169689Skan develop(bool, LargePagesIndividualAllocationInjectError, false, \ 613169689Skan "Fail large pages individual allocation") \ 614169689Skan \ 615169689Skan product(bool, UseLargePagesInMetaspace, false, \ 616169689Skan "Use large page memory in metaspace. " \ 617169689Skan "Only used if UseLargePages is enabled.") \ 618169689Skan \ 619169689Skan product(bool, UseNUMA, false, \ 620169689Skan "Use NUMA if available") \ 621169689Skan \ 622169689Skan product(bool, UseNUMAInterleaving, false, \ 623169689Skan "Interleave memory across NUMA nodes if available") \ 624169689Skan \ 625169689Skan product(size_t, NUMAInterleaveGranularity, 2*M, \ 626169689Skan "Granularity to use for NUMA interleaving on Windows OS") \ 627169689Skan range(os::vm_allocation_granularity(), NOT_LP64(2*G) LP64_ONLY(8192*G)) \ 628169689Skan \ 629169689Skan product(bool, ForceNUMA, false, \ 630169689Skan "Force NUMA optimizations on single-node/UMA systems") \ 631169689Skan \ 632169689Skan product(uintx, NUMAChunkResizeWeight, 20, \ 633169689Skan "Percentage (0-100) used to weight the current sample when " \ 634169689Skan "computing exponentially decaying average for " \ 635169689Skan "AdaptiveNUMAChunkSizing") \ 636169689Skan range(0, 100) \ 637169689Skan \ 638169689Skan product(size_t, NUMASpaceResizeRate, 1*G, \ 639169689Skan "Do not reallocate more than this amount per collection") \ 640169689Skan range(0, max_uintx) \ 641169689Skan \ 642169689Skan product(bool, UseAdaptiveNUMAChunkSizing, true, \ 643169689Skan "Enable adaptive chunk sizing for NUMA") \ 644169689Skan \ 645169689Skan product(bool, NUMAStats, false, \ 646169689Skan "Print NUMA stats in detailed heap information") \ 647169689Skan \ 648169689Skan product(uintx, NUMAPageScanRate, 256, \ 649169689Skan "Maximum number of pages to include in the page scan procedure") \ 650169689Skan range(0, max_uintx) \ 651169689Skan \ 652169689Skan product_pd(bool, NeedsDeoptSuspend, \ 653169689Skan "True for register window machines (sparc/ia64)") \ 654169689Skan \ 655169689Skan product(intx, UseSSE, 99, \ 656169689Skan "Highest supported SSE instructions set on x86/x64") \ 657169689Skan range(0, 99) \ 658169689Skan \ 659169689Skan product(bool, UseAES, false, \ 660169689Skan "Control whether AES instructions can be used on x86/x64") \ 661169689Skan \ 662169689Skan product(bool, UseSHA, false, \ 663169689Skan "Control whether SHA instructions can be used " \ 664169689Skan "on SPARC, on ARM and on x86") \ 665169689Skan \ 666169689Skan diagnostic(bool, UseGHASHIntrinsics, false, \ 667169689Skan "Use intrinsics for GHASH versions of crypto") \ 668169689Skan \ 669169689Skan product(size_t, LargePageSizeInBytes, 0, \ 670169689Skan "Large page size (0 to let VM choose the page size)") \ 671169689Skan range(0, max_uintx) \ 672169689Skan \ 673169689Skan product(size_t, LargePageHeapSizeThreshold, 128*M, \ 674169689Skan "Use large pages if maximum heap is at least this big") \ 675169689Skan range(0, max_uintx) \ 676169689Skan \ 677169689Skan product(bool, ForceTimeHighResolution, false, \ 678169689Skan "Using high time resolution (for Win32 only)") \ 679169689Skan \ 680169689Skan develop(bool, TracePcPatching, false, \ 681169689Skan "Trace usage of frame::patch_pc") \ 682169689Skan \ 683169689Skan develop(bool, TraceRelocator, false, \ 684169689Skan "Trace the bytecode relocator") \ 685169689Skan \ 686169689Skan develop(bool, TraceLongCompiles, false, \ 687169689Skan "Print out every time compilation is longer than " \ 688169689Skan "a given threshold") \ 689169689Skan \ 690169689Skan develop(bool, SafepointALot, false, \ 691169689Skan "Generate a lot of safepoints. This works with " \ 692169689Skan "GuaranteedSafepointInterval") \ 693169689Skan \ 694169689Skan product_pd(bool, BackgroundCompilation, \ 695169689Skan "A thread requesting compilation is not blocked during " \ 696169689Skan "compilation") \ 697169689Skan \ 698169689Skan product(bool, PrintVMQWaitTime, false, \ 699169689Skan "Print out the waiting time in VM operation queue") \ 700169689Skan \ 701169689Skan develop(bool, TraceOopMapGeneration, false, \ 702169689Skan "Show OopMapGeneration") \ 703169689Skan \ 704169689Skan product(bool, MethodFlushing, true, \ 705169689Skan "Reclamation of zombie and not-entrant methods") \ 706169689Skan \ 707169689Skan develop(bool, VerifyStack, false, \ 708169689Skan "Verify stack of each thread when it is entering a runtime call") \ 709169689Skan \ 710169689Skan diagnostic(bool, ForceUnreachable, false, \ 711169689Skan "Make all non code cache addresses to be unreachable by " \ 712169689Skan "forcing use of 64bit literal fixups") \ 713169689Skan \ 714169689Skan notproduct(bool, StressDerivedPointers, false, \ 715169689Skan "Force scavenge when a derived pointer is detected on stack " \ 716169689Skan "after rtm call") \ 717169689Skan \ 718169689Skan develop(bool, TraceDerivedPointers, false, \ 719169689Skan "Trace traversal of derived pointers on stack") \ 720169689Skan \ 721169689Skan notproduct(bool, TraceCodeBlobStacks, false, \ 722169689Skan "Trace stack-walk of codeblobs") \ 723169689Skan \ 724169689Skan product(bool, PrintJNIResolving, false, \ 725169689Skan "Used to implement -v:jni") \ 726169689Skan \ 727169689Skan notproduct(bool, PrintRewrites, false, \ 728169689Skan "Print methods that are being rewritten") \ 729169689Skan \ 730169689Skan product(bool, UseInlineCaches, true, \ 731169689Skan "Use Inline Caches for virtual calls ") \ 732169689Skan \ 733169689Skan diagnostic(bool, InlineArrayCopy, true, \ 734169689Skan "Inline arraycopy native that is known to be part of " \ 735169689Skan "base library DLL") \ 736169689Skan \ 737169689Skan diagnostic(bool, InlineObjectHash, true, \ 738169689Skan "Inline Object::hashCode() native that is known to be part " \ 739169689Skan "of base library DLL") \ 740169689Skan \ 741169689Skan diagnostic(bool, InlineNatives, true, \ 742169689Skan "Inline natives that are known to be part of base library DLL") \ 743169689Skan \ 744169689Skan diagnostic(bool, InlineMathNatives, true, \ 745169689Skan "Inline SinD, CosD, etc.") \ 746169689Skan \ 747169689Skan diagnostic(bool, InlineClassNatives, true, \ 748169689Skan "Inline Class.isInstance, etc") \ 749169689Skan \ 750169689Skan diagnostic(bool, InlineThreadNatives, true, \ 751169689Skan "Inline Thread.currentThread, etc") \ 752169689Skan \ 753169689Skan diagnostic(bool, InlineUnsafeOps, true, \ 754169689Skan "Inline memory ops (native methods) from Unsafe") \ 755169689Skan \ 756169689Skan product(bool, CriticalJNINatives, true, \ 757169689Skan "Check for critical JNI entry points") \ 758169689Skan \ 759169689Skan notproduct(bool, StressCriticalJNINatives, false, \ 760169689Skan "Exercise register saving code in critical natives") \ 761169689Skan \ 762169689Skan diagnostic(bool, UseAESIntrinsics, false, \ 763169689Skan "Use intrinsics for AES versions of crypto") \ 764169689Skan \ 765169689Skan diagnostic(bool, UseAESCTRIntrinsics, false, \ 766169689Skan "Use intrinsics for the paralleled version of AES/CTR crypto") \ 767169689Skan \ 768169689Skan diagnostic(bool, UseSHA1Intrinsics, false, \ 769169689Skan "Use intrinsics for SHA-1 crypto hash function. " \ 770169689Skan "Requires that UseSHA is enabled.") \ 771169689Skan \ 772169689Skan diagnostic(bool, UseSHA256Intrinsics, false, \ 773169689Skan "Use intrinsics for SHA-224 and SHA-256 crypto hash functions. " \ 774169689Skan "Requires that UseSHA is enabled.") \ 775169689Skan \ 776169689Skan diagnostic(bool, UseSHA512Intrinsics, false, \ 777169689Skan "Use intrinsics for SHA-384 and SHA-512 crypto hash functions. " \ 778169689Skan "Requires that UseSHA is enabled.") \ 779169689Skan \ 780169689Skan diagnostic(bool, UseCRC32Intrinsics, false, \ 781169689Skan "use intrinsics for java.util.zip.CRC32") \ 782169689Skan \ 783169689Skan diagnostic(bool, UseCRC32CIntrinsics, false, \ 784169689Skan "use intrinsics for java.util.zip.CRC32C") \ 785169689Skan \ 786169689Skan diagnostic(bool, UseAdler32Intrinsics, false, \ 787169689Skan "use intrinsics for java.util.zip.Adler32") \ 788169689Skan \ 789169689Skan diagnostic(bool, UseVectorizedMismatchIntrinsic, false, \ 790169689Skan "Enables intrinsification of ArraysSupport.vectorizedMismatch()") \ 791169689Skan \ 792169689Skan diagnostic(ccstrlist, DisableIntrinsic, "", \ 793169689Skan "do not expand intrinsics whose (internal) names appear here") \ 794169689Skan \ 795169689Skan develop(bool, TraceCallFixup, false, \ 796169689Skan "Trace all call fixups") \ 797169689Skan \ 798169689Skan develop(bool, DeoptimizeALot, false, \ 799169689Skan "Deoptimize at every exit from the runtime system") \ 800169689Skan \ 801169689Skan notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ 802169689Skan "A comma separated list of bcis to deoptimize at") \ 803169689Skan \ 804169689Skan product(bool, DeoptimizeRandom, false, \ 805169689Skan "Deoptimize random frames on random exit from the runtime system")\ 806169689Skan \ 807169689Skan notproduct(bool, ZombieALot, false, \ 808169689Skan "Create zombies (non-entrant) at exit from the runtime system") \ 809169689Skan \ 810169689Skan product(bool, UnlinkSymbolsALot, false, \ 811169689Skan "Unlink unreferenced symbols from the symbol table at safepoints")\ 812169689Skan \ 813169689Skan notproduct(bool, WalkStackALot, false, \ 814169689Skan "Trace stack (no print) at every exit from the runtime system") \ 815169689Skan \ 816169689Skan product(bool, Debugging, false, \ 817169689Skan "Set when executing debug methods in debug.cpp " \ 818169689Skan "(to prevent triggering assertions)") \ 819169689Skan \ 820169689Skan notproduct(bool, StrictSafepointChecks, trueInDebug, \ 821169689Skan "Enable strict checks that safepoints cannot happen for threads " \ 822169689Skan "that use NoSafepointVerifier") \ 823169689Skan \ 824169689Skan notproduct(bool, VerifyLastFrame, false, \ 825169689Skan "Verify oops on last frame on entry to VM") \ 826169689Skan \ 827169689Skan product(bool, FailOverToOldVerifier, true, \ 828169689Skan "Fail over to old verifier when split verifier fails") \ 829169689Skan \ 830169689Skan develop(bool, ShowSafepointMsgs, false, \ 831169689Skan "Show message about safepoint synchronization") \ 832169689Skan \ 833169689Skan product(bool, SafepointTimeout, false, \ 834169689Skan "Time out and warn or fail after SafepointTimeoutDelay " \ 835169689Skan "milliseconds if failed to reach safepoint") \ 836169689Skan \ 837169689Skan develop(bool, DieOnSafepointTimeout, false, \ 838169689Skan "Die upon failure to reach safepoint (see SafepointTimeout)") \ 839169689Skan \ 840169689Skan /* 50 retries * (5 * current_retry_count) millis = ~6.375 seconds */ \ 841169689Skan /* typically, at most a few retries are needed */ \ 842169689Skan product(intx, SuspendRetryCount, 50, \ 843169689Skan "Maximum retry count for an external suspend request") \ 844169689Skan range(0, max_intx) \ 845169689Skan \ 846169689Skan product(intx, SuspendRetryDelay, 5, \ 847169689Skan "Milliseconds to delay per retry (* current_retry_count)") \ 848169689Skan range(0, max_intx) \ 849169689Skan \ 850169689Skan product(bool, AssertOnSuspendWaitFailure, false, \ 851169689Skan "Assert/Guarantee on external suspend wait failure") \ 852169689Skan \ 853169689Skan product(bool, TraceSuspendWaitFailures, false, \ 854169689Skan "Trace external suspend wait failures") \ 855169689Skan \ 856169689Skan product(bool, MaxFDLimit, true, \ 857169689Skan "Bump the number of file descriptors to maximum in Solaris") \ 858169689Skan \ 859169689Skan diagnostic(bool, LogEvents, true, \ 860169689Skan "Enable the various ring buffer event logs") \ 861169689Skan \ 862169689Skan diagnostic(uintx, LogEventsBufferEntries, 10, \ 863169689Skan "Number of ring buffer event logs") \ 864169689Skan range(1, NOT_LP64(1*K) LP64_ONLY(1*M)) \ 865169689Skan \ 866169689Skan product(bool, BytecodeVerificationRemote, true, \ 867169689Skan "Enable the Java bytecode verifier for remote classes") \ 868169689Skan \ 869169689Skan product(bool, BytecodeVerificationLocal, false, \ 870169689Skan "Enable the Java bytecode verifier for local classes") \ 871169689Skan \ 872169689Skan develop(bool, ForceFloatExceptions, trueInDebug, \ 873169689Skan "Force exceptions on FP stack under/overflow") \ 874169689Skan \ 875169689Skan develop(bool, VerifyStackAtCalls, false, \ 876169689Skan "Verify that the stack pointer is unchanged after calls") \ 877169689Skan \ 878169689Skan develop(bool, TraceJavaAssertions, false, \ 879169689Skan "Trace java language assertions") \ 880169689Skan \ 881169689Skan notproduct(bool, CheckAssertionStatusDirectives, false, \ 882169689Skan "Temporary - see javaClasses.cpp") \ 883169689Skan \ 884169689Skan notproduct(bool, PrintMallocFree, false, \ 885169689Skan "Trace calls to C heap malloc/free allocation") \ 886169689Skan \ 887169689Skan notproduct(bool, VerifyCodeCache, false, \ 888169689Skan "Verify code cache on memory allocation/deallocation") \ 889169689Skan \ 890169689Skan develop(bool, UseMallocOnly, false, \ 891169689Skan "Use only malloc/free for allocation (no resource area/arena)") \ 892169689Skan \ 893169689Skan develop(bool, PrintMalloc, false, \ 894169689Skan "Print all malloc/free calls") \ 895169689Skan \ 896169689Skan develop(bool, PrintMallocStatistics, false, \ 897169689Skan "Print malloc/free statistics") \ 898169689Skan \ 899169689Skan develop(bool, ZapResourceArea, trueInDebug, \ 900169689Skan "Zap freed resource/arena space with 0xABABABAB") \ 901169689Skan \ 902169689Skan notproduct(bool, ZapVMHandleArea, trueInDebug, \ 903169689Skan "Zap freed VM handle space with 0xBCBCBCBC") \ 904169689Skan \ 905169689Skan develop(bool, ZapJNIHandleArea, trueInDebug, \ 906169689Skan "Zap freed JNI handle space with 0xFEFEFEFE") \ 907169689Skan \ 908169689Skan notproduct(bool, ZapStackSegments, trueInDebug, \ 909169689Skan "Zap allocated/freed stack segments with 0xFADFADED") \ 910169689Skan \ 911169689Skan develop(bool, ZapUnusedHeapArea, trueInDebug, \ 912169689Skan "Zap unused heap space with 0xBAADBABE") \ 913169689Skan \ 914169689Skan develop(bool, CheckZapUnusedHeapArea, false, \ 915169689Skan "Check zapping of unused heap space") \ 916169689Skan \ 917169689Skan develop(bool, ZapFillerObjects, trueInDebug, \ 918169689Skan "Zap filler objects with 0xDEAFBABE") \ 919169689Skan \ 920169689Skan develop(bool, PrintVMMessages, true, \ 921169689Skan "Print VM messages on console") \ 922169689Skan \ 923169689Skan notproduct(uintx, ErrorHandlerTest, 0, \ 924169689Skan "If > 0, provokes an error after VM initialization; the value " \ 925169689Skan "determines which error to provoke. See test_error_handler() " \ 926169689Skan "in debug.cpp.") \ 927169689Skan \ 928169689Skan notproduct(uintx, TestCrashInErrorHandler, 0, \ 929169689Skan "If > 0, provokes an error inside VM error handler (a secondary " \ 930169689Skan "crash). see test_error_handler() in debug.cpp.") \ 931169689Skan \ 932169689Skan notproduct(bool, TestSafeFetchInErrorHandler, false, \ 933169689Skan "If true, tests SafeFetch inside error handler.") \ 934169689Skan \ 935169689Skan develop(bool, Verbose, false, \ 936169689Skan "Print additional debugging information from other modes") \ 937169689Skan \ 938169689Skan develop(bool, PrintMiscellaneous, false, \ 939169689Skan "Print uncategorized debugging information (requires +Verbose)") \ 940169689Skan \ 941169689Skan develop(bool, WizardMode, false, \ 942169689Skan "Print much more debugging information") \ 943169689Skan \ 944169689Skan product(bool, ShowMessageBoxOnError, false, \ 945169689Skan "Keep process alive on VM fatal error") \ 946169689Skan \ 947169689Skan product(bool, CreateCoredumpOnCrash, true, \ 948169689Skan "Create core/mini dump on VM fatal error") \ 949169689Skan \ 950169689Skan product(uint64_t, ErrorLogTimeout, 2 * 60, \ 951169689Skan "Timeout, in seconds, to limit the time spent on writing an " \ 952169689Skan "error log in case of a crash.") \ 953169689Skan range(0, (uint64_t)max_jlong/1000) \ 954169689Skan \ 955169689Skan product_pd(bool, UseOSErrorReporting, \ 956169689Skan "Let VM fatal error propagate to the OS (ie. WER on Windows)") \ 957169689Skan \ 958169689Skan product(bool, SuppressFatalErrorMessage, false, \ 959169689Skan "Report NO fatal error message (avoid deadlock)") \ 960169689Skan \ 961169689Skan product(ccstrlist, OnError, "", \ 962169689Skan "Run user-defined commands on fatal error; see VMError.cpp " \ 963169689Skan "for examples") \ 964169689Skan \ 965169689Skan product(ccstrlist, OnOutOfMemoryError, "", \ 966169689Skan "Run user-defined commands on first java.lang.OutOfMemoryError") \ 967169689Skan \ 968169689Skan manageable(bool, HeapDumpBeforeFullGC, false, \ 969169689Skan "Dump heap to file before any major stop-the-world GC") \ 970169689Skan \ 971169689Skan manageable(bool, HeapDumpAfterFullGC, false, \ 972169689Skan "Dump heap to file after any major stop-the-world GC") \ 973169689Skan \ 974169689Skan manageable(bool, HeapDumpOnOutOfMemoryError, false, \ 975169689Skan "Dump heap to file when java.lang.OutOfMemoryError is thrown") \ 976169689Skan \ 977169689Skan manageable(ccstr, HeapDumpPath, NULL, \ 978169689Skan "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ 979169689Skan "directory) of the dump file (defaults to java_pid<pid>.hprof " \ 980169689Skan "in the working directory)") \ 981169689Skan \ 982169689Skan develop(bool, BreakAtWarning, false, \ 983169689Skan "Execute breakpoint upon encountering VM warning") \ 984169689Skan \ 985169689Skan develop(bool, UseFakeTimers, false, \ 986169689Skan "Tell whether the VM should use system time or a fake timer") \ 987169689Skan \ 988169689Skan product(ccstr, NativeMemoryTracking, "off", \ 989169689Skan "Native memory tracking options") \ 990169689Skan \ 991169689Skan diagnostic(bool, PrintNMTStatistics, false, \ 992169689Skan "Print native memory tracking summary data if it is on") \ 993169689Skan \ 994169689Skan diagnostic(bool, LogCompilation, false, \ 995169689Skan "Log compilation activity in detail to LogFile") \ 996169689Skan \ 997169689Skan product(bool, PrintCompilation, false, \ 998169689Skan "Print compilations") \ 999169689Skan \ 1000169689Skan diagnostic(bool, TraceNMethodInstalls, false, \ 1001169689Skan "Trace nmethod installation") \ 1002169689Skan \ 1003169689Skan diagnostic(intx, ScavengeRootsInCode, 2, \ 1004169689Skan "0: do not allow scavengable oops in the code cache; " \ 1005169689Skan "1: allow scavenging from the code cache; " \ 1006169689Skan "2: emit as many constants as the compiler can see") \ 1007169689Skan range(0, 2) \ 1008169689Skan \ 1009169689Skan product(bool, AlwaysRestoreFPU, false, \ 1010169689Skan "Restore the FPU control word after every JNI call (expensive)") \ 1011169689Skan \ 1012169689Skan diagnostic(bool, PrintCompilation2, false, \ 1013169689Skan "Print additional statistics per compilation") \ 1014169689Skan \ 1015169689Skan diagnostic(bool, PrintAdapterHandlers, false, \ 1016169689Skan "Print code generated for i2c/c2i adapters") \ 1017169689Skan \ 1018169689Skan diagnostic(bool, VerifyAdapterCalls, trueInDebug, \ 1019169689Skan "Verify that i2c/c2i adapters are called properly") \ 1020169689Skan \ 1021169689Skan develop(bool, VerifyAdapterSharing, false, \ 1022169689Skan "Verify that the code for shared adapters is the equivalent") \ 1023169689Skan \ 1024169689Skan diagnostic(bool, PrintAssembly, false, \ 1025169689Skan "Print assembly code (using external disassembler.so)") \ 1026169689Skan \ 1027169689Skan diagnostic(ccstr, PrintAssemblyOptions, NULL, \ 1028169689Skan "Print options string passed to disassembler.so") \ 1029169689Skan \ 1030169689Skan notproduct(bool, PrintNMethodStatistics, false, \ 1031169689Skan "Print a summary statistic for the generated nmethods") \ 1032169689Skan \ 1033169689Skan diagnostic(bool, PrintNMethods, false, \ 1034169689Skan "Print assembly code for nmethods when generated") \ 1035169689Skan \ 1036169689Skan diagnostic(bool, PrintNativeNMethods, false, \ 1037169689Skan "Print assembly code for native nmethods when generated") \ 1038169689Skan \ 1039169689Skan develop(bool, PrintDebugInfo, false, \ 1040169689Skan "Print debug information for all nmethods when generated") \ 1041169689Skan \ 1042169689Skan develop(bool, PrintRelocations, false, \ 1043169689Skan "Print relocation information for all nmethods when generated") \ 1044169689Skan \ 1045169689Skan develop(bool, PrintDependencies, false, \ 1046169689Skan "Print dependency information for all nmethods when generated") \ 1047169689Skan \ 1048169689Skan develop(bool, PrintExceptionHandlers, false, \ 1049169689Skan "Print exception handler tables for all nmethods when generated") \ 1050169689Skan \ 1051169689Skan develop(bool, StressCompiledExceptionHandlers, false, \ 1052169689Skan "Exercise compiled exception handlers") \ 1053169689Skan \ 1054169689Skan develop(bool, InterceptOSException, false, \ 1055169689Skan "Start debugger when an implicit OS (e.g. NULL) " \ 1056169689Skan "exception happens") \ 1057169689Skan \ 1058169689Skan product(bool, PrintCodeCache, false, \ 1059169689Skan "Print the code cache memory usage when exiting") \ 1060169689Skan \ 1061169689Skan develop(bool, PrintCodeCache2, false, \ 1062169689Skan "Print detailed usage information on the code cache when exiting")\ 1063169689Skan \ 1064169689Skan product(bool, PrintCodeCacheOnCompilation, false, \ 1065169689Skan "Print the code cache memory usage each time a method is " \ 1066169689Skan "compiled") \ 1067169689Skan \ 1068169689Skan diagnostic(bool, PrintStubCode, false, \ 1069169689Skan "Print generated stub code") \ 1070169689Skan \ 1071169689Skan product(bool, StackTraceInThrowable, true, \ 1072169689Skan "Collect backtrace in throwable when exception happens") \ 1073169689Skan \ 1074169689Skan product(bool, OmitStackTraceInFastThrow, true, \ 1075169689Skan "Omit backtraces for some 'hot' exceptions in optimized code") \ 1076169689Skan \ 1077169689Skan product(bool, ProfilerPrintByteCodeStatistics, false, \ 1078169689Skan "Print bytecode statistics when dumping profiler output") \ 1079169689Skan \ 1080169689Skan product(bool, ProfilerRecordPC, false, \ 1081169689Skan "Collect ticks for each 16 byte interval of compiled code") \ 1082169689Skan \ 1083169689Skan product(bool, ProfileVM, false, \ 1084169689Skan "Profile ticks that fall within VM (either in the VM Thread " \ 1085169689Skan "or VM code called through stubs)") \ 1086169689Skan \ 1087169689Skan product(bool, ProfileIntervals, false, \ 1088169689Skan "Print profiles for each interval (see ProfileIntervalsTicks)") \ 1089169689Skan \ 1090169689Skan notproduct(bool, ProfilerCheckIntervals, false, \ 1091169689Skan "Collect and print information on spacing of profiler ticks") \ 1092169689Skan \ 1093169689Skan product(bool, PrintWarnings, true, \ 1094169689Skan "Print JVM warnings to output stream") \ 1095169689Skan \ 1096169689Skan notproduct(uintx, WarnOnStalledSpinLock, 0, \ 1097169689Skan "Print warnings for stalled SpinLocks") \ 1098169689Skan \ 1099169689Skan product(bool, RegisterFinalizersAtInit, true, \ 1100169689Skan "Register finalizable objects at end of Object.<init> or " \ 1101169689Skan "after allocation") \ 1102169689Skan \ 1103169689Skan develop(bool, RegisterReferences, true, \ 1104169689Skan "Tell whether the VM should register soft/weak/final/phantom " \ 1105169689Skan "references") \ 1106169689Skan \ 1107169689Skan develop(bool, IgnoreRewrites, false, \ 1108169689Skan "Suppress rewrites of bytecodes in the oopmap generator. " \ 1109169689Skan "This is unsafe!") \ 1110169689Skan \ 1111169689Skan develop(bool, PrintCodeCacheExtension, false, \ 1112169689Skan "Print extension of code cache") \ 1113169689Skan \ 1114169689Skan develop(bool, UsePrivilegedStack, true, \ 1115169689Skan "Enable the security JVM functions") \ 1116169689Skan \ 1117169689Skan develop(bool, ProtectionDomainVerification, true, \ 1118169689Skan "Verify protection domain before resolution in system dictionary")\ 1119169689Skan \ 1120169689Skan product(bool, ClassUnloading, true, \ 1121169689Skan "Do unloading of classes") \ 1122169689Skan \ 1123169689Skan product(bool, ClassUnloadingWithConcurrentMark, true, \ 1124169689Skan "Do unloading of classes with a concurrent marking cycle") \ 1125169689Skan \ 1126169689Skan develop(bool, DisableStartThread, false, \ 1127169689Skan "Disable starting of additional Java threads " \ 1128169689Skan "(for debugging only)") \ 1129169689Skan \ 1130169689Skan develop(bool, MemProfiling, false, \ 1131169689Skan "Write memory usage profiling to log file") \ 1132169689Skan \ 1133169689Skan notproduct(bool, PrintSystemDictionaryAtExit, false, \ 1134169689Skan "Print the system dictionary at exit") \ 1135169689Skan \ 1136169689Skan experimental(intx, PredictedLoadedClassCount, 0, \ 1137169689Skan "Experimental: Tune loaded class cache starting size") \ 1138169689Skan \ 1139169689Skan diagnostic(bool, UnsyncloadClass, false, \ 1140169689Skan "Unstable: VM calls loadClass unsynchronized. Custom " \ 1141169689Skan "class loader must call VM synchronized for findClass " \ 1142169689Skan "and defineClass.") \ 1143169689Skan \ 1144169689Skan product(bool, AlwaysLockClassLoader, false, \ 1145169689Skan "Require the VM to acquire the class loader lock before calling " \ 1146169689Skan "loadClass() even for class loaders registering " \ 1147169689Skan "as parallel capable") \ 1148169689Skan \ 1149169689Skan product(bool, AllowParallelDefineClass, false, \ 1150169689Skan "Allow parallel defineClass requests for class loaders " \ 1151169689Skan "registering as parallel capable") \ 1152169689Skan \ 1153169689Skan product(bool, MustCallLoadClassInternal, false, \ 1154169689Skan "Call loadClassInternal() rather than loadClass()") \ 1155169689Skan \ 1156169689Skan product_pd(bool, DontYieldALot, \ 1157169689Skan "Throw away obvious excess yield calls") \ 1158169689Skan \ 1159169689Skan product(bool, ConvertSleepToYield, true, \ 1160169689Skan "Convert sleep(0) to thread yield ") \ 1161169689Skan \ 1162169689Skan product(bool, ConvertYieldToSleep, false, \ 1163169689Skan "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \ 1164169689Skan "behavior") \ 1165169689Skan \ 1166169689Skan develop(bool, UseDetachedThreads, true, \ 1167169689Skan "Use detached threads that are recycled upon termination " \ 1168169689Skan "(for Solaris only)") \ 1169169689Skan \ 1170169689Skan product(bool, UseLWPSynchronization, true, \ 1171169689Skan "Use LWP-based instead of libthread-based synchronization " \ 1172169689Skan "(SPARC only)") \ 1173169689Skan \ 1174169689Skan experimental(ccstr, SyncKnobs, NULL, \ 1175169689Skan "(Unstable) Various monitor synchronization tunables") \ 1176169689Skan \ 1177169689Skan experimental(intx, EmitSync, 0, \ 1178169689Skan "(Unsafe, Unstable) " \ 1179169689Skan "Control emission of inline sync fast-path code") \ 1180169689Skan \ 1181169689Skan product(intx, MonitorBound, 0, "Bound Monitor population") \ 1182169689Skan range(0, max_jint) \ 1183169689Skan \ 1184169689Skan product(bool, MonitorInUseLists, true, "Track Monitors for Deflation") \ 1185169689Skan \ 1186169689Skan experimental(intx, SyncFlags, 0, "(Unsafe, Unstable) " \ 1187169689Skan "Experimental Sync flags") \ 1188169689Skan \ 1189169689Skan experimental(intx, SyncVerbose, 0, "(Unstable)") \ 1190169689Skan \ 1191169689Skan diagnostic(bool, InlineNotify, true, "intrinsify subset of notify") \ 1192169689Skan \ 1193169689Skan experimental(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \ 1194169689Skan \ 1195169689Skan experimental(intx, hashCode, 5, \ 1196169689Skan "(Unstable) select hashCode generation algorithm") \ 1197169689Skan \ 1198169689Skan product(bool, FilterSpuriousWakeups, true, \ 1199169689Skan "When true prevents OS-level spurious, or premature, wakeups " \ 1200169689Skan "from Object.wait (Ignored for Windows)") \ 1201169689Skan \ 1202169689Skan experimental(intx, NativeMonitorTimeout, -1, "(Unstable)") \ 1203169689Skan \ 1204169689Skan experimental(intx, NativeMonitorFlags, 0, "(Unstable)") \ 1205169689Skan \ 1206169689Skan experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \ 1207169689Skan \ 1208169689Skan develop(bool, UsePthreads, false, \ 1209169689Skan "Use pthread-based instead of libthread-based synchronization " \ 1210169689Skan "(SPARC only)") \ 1211169689Skan \ 1212169689Skan product(bool, ReduceSignalUsage, false, \ 1213169689Skan "Reduce the use of OS signals in Java and/or the VM") \ 1214169689Skan \ 1215169689Skan develop_pd(bool, ShareVtableStubs, \ 1216169689Skan "Share vtable stubs (smaller code but worse branch prediction") \ 1217169689Skan \ 1218169689Skan develop(bool, LoadLineNumberTables, true, \ 1219169689Skan "Tell whether the class file parser loads line number tables") \ 1220169689Skan \ 1221169689Skan develop(bool, LoadLocalVariableTables, true, \ 1222169689Skan "Tell whether the class file parser loads local variable tables") \ 1223169689Skan \ 1224169689Skan develop(bool, LoadLocalVariableTypeTables, true, \ 1225169689Skan "Tell whether the class file parser loads local variable type" \ 1226169689Skan "tables") \ 1227169689Skan \ 1228169689Skan product(bool, AllowUserSignalHandlers, false, \ 1229169689Skan "Do not complain if the application installs signal handlers " \ 1230169689Skan "(Solaris & Linux only)") \ 1231169689Skan \ 1232169689Skan product(bool, UseSignalChaining, true, \ 1233169689Skan "Use signal-chaining to invoke signal handlers installed " \ 1234169689Skan "by the application (Solaris & Linux only)") \ 1235169689Skan \ 1236169689Skan product(bool, AllowJNIEnvProxy, false, \ 1237169689Skan "Allow JNIEnv proxies for jdbx") \ 1238169689Skan \ 1239169689Skan product(bool, RestoreMXCSROnJNICalls, false, \ 1240169689Skan "Restore MXCSR when returning from JNI calls") \ 1241169689Skan \ 1242169689Skan product(bool, CheckJNICalls, false, \ 1243169689Skan "Verify all arguments to JNI calls") \ 1244169689Skan \ 1245169689Skan product(bool, CheckEndorsedAndExtDirs, false, \ 1246169689Skan "Verify the endorsed and extension directories are not used") \ 1247169689Skan \ 1248169689Skan product(bool, UseFastJNIAccessors, true, \ 1249169689Skan "Use optimized versions of Get<Primitive>Field") \ 1250169689Skan \ 1251169689Skan product(intx, MaxJNILocalCapacity, 65536, \ 1252169689Skan "Maximum allowable local JNI handle capacity to " \ 1253169689Skan "EnsureLocalCapacity() and PushLocalFrame(), " \ 1254169689Skan "where <= 0 is unlimited, default: 65536") \ 1255169689Skan range(min_intx, max_intx) \ 1256169689Skan \ 1257169689Skan product(bool, EagerXrunInit, false, \ 1258169689Skan "Eagerly initialize -Xrun libraries; allows startup profiling, " \ 1259169689Skan "but not all -Xrun libraries may support the state of the VM " \ 1260169689Skan "at this time") \ 1261169689Skan \ 1262169689Skan product(bool, PreserveAllAnnotations, false, \ 1263169689Skan "Preserve RuntimeInvisibleAnnotations as well " \ 1264169689Skan "as RuntimeVisibleAnnotations") \ 1265169689Skan \ 1266169689Skan develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ 1267169689Skan "Number of OutOfMemoryErrors preallocated with backtrace") \ 1268169689Skan \ 1269169689Skan product(bool, UseXMMForArrayCopy, false, \ 1270169689Skan "Use SSE2 MOVQ instruction for Arraycopy") \ 1271169689Skan \ 1272169689Skan product(intx, FieldsAllocationStyle, 1, \ 1273169689Skan "0 - type based with oops first, " \ 1274169689Skan "1 - with oops last, " \ 1275169689Skan "2 - oops in super and sub classes are together") \ 1276169689Skan range(0, 2) \ 1277169689Skan \ 1278169689Skan product(bool, CompactFields, true, \ 1279169689Skan "Allocate nonstatic fields in gaps between previous fields") \ 1280169689Skan \ 1281169689Skan notproduct(bool, PrintFieldLayout, false, \ 1282169689Skan "Print field layout for each class") \ 1283169689Skan \ 1284169689Skan /* Need to limit the extent of the padding to reasonable size. */\ 1285169689Skan /* 8K is well beyond the reasonable HW cache line size, even with */\ 1286169689Skan /* aggressive prefetching, while still leaving the room for segregating */\ 1287169689Skan /* among the distinct pages. */\ 1288169689Skan product(intx, ContendedPaddingWidth, 128, \ 1289169689Skan "How many bytes to pad the fields/classes marked @Contended with")\ 1290169689Skan range(0, 8192) \ 1291169689Skan constraint(ContendedPaddingWidthConstraintFunc,AfterErgo) \ 1292169689Skan \ 1293169689Skan product(bool, EnableContended, true, \ 1294169689Skan "Enable @Contended annotation support") \ 1295169689Skan \ 1296169689Skan product(bool, RestrictContended, true, \ 1297169689Skan "Restrict @Contended to trusted classes") \ 1298169689Skan \ 1299169689Skan product(bool, UseBiasedLocking, true, \ 1300169689Skan "Enable biased locking in JVM") \ 1301169689Skan \ 1302169689Skan product(intx, BiasedLockingStartupDelay, 4000, \ 1303169689Skan "Number of milliseconds to wait before enabling biased locking") \ 1304169689Skan range(0, (intx)(max_jint-(max_jint%PeriodicTask::interval_gran))) \ 1305169689Skan constraint(BiasedLockingStartupDelayFunc,AfterErgo) \ 1306169689Skan \ 1307169689Skan diagnostic(bool, PrintBiasedLockingStatistics, false, \ 1308169689Skan "Print statistics of biased locking in JVM") \ 1309169689Skan \ 1310169689Skan product(intx, BiasedLockingBulkRebiasThreshold, 20, \ 1311169689Skan "Threshold of number of revocations per type to try to " \ 1312169689Skan "rebias all objects in the heap of that type") \ 1313169689Skan range(0, max_intx) \ 1314169689Skan constraint(BiasedLockingBulkRebiasThresholdFunc,AfterErgo) \ 1315169689Skan \ 1316169689Skan product(intx, BiasedLockingBulkRevokeThreshold, 40, \ 1317169689Skan "Threshold of number of revocations per type to permanently " \ 1318169689Skan "revoke biases of all objects in the heap of that type") \ 1319169689Skan range(0, max_intx) \ 1320169689Skan constraint(BiasedLockingBulkRevokeThresholdFunc,AfterErgo) \ 1321169689Skan \ 1322169689Skan product(intx, BiasedLockingDecayTime, 25000, \ 1323169689Skan "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \ 1324169689Skan "type after previous bulk rebias") \ 1325169689Skan range(500, max_intx) \ 1326169689Skan constraint(BiasedLockingDecayTimeFunc,AfterErgo) \ 1327169689Skan \ 1328169689Skan product(bool, ExitOnOutOfMemoryError, false, \ 1329169689Skan "JVM exits on the first occurrence of an out-of-memory error") \ 1330169689Skan \ 1331169689Skan product(bool, CrashOnOutOfMemoryError, false, \ 1332169689Skan "JVM aborts, producing an error log and core/mini dump, on the " \ 1333169689Skan "first occurrence of an out-of-memory error") \ 1334169689Skan \ 1335169689Skan /* tracing */ \ 1336169689Skan \ 1337169689Skan develop(bool, StressRewriter, false, \ 1338169689Skan "Stress linktime bytecode rewriting") \ 1339169689Skan \ 1340169689Skan product(ccstr, TraceJVMTI, NULL, \ 1341169689Skan "Trace flags for JVMTI functions and events") \ 1342169689Skan \ 1343169689Skan /* This option can change an EMCP method into an obsolete method. */ \ 1344169689Skan /* This can affect tests that except specific methods to be EMCP. */ \ 1345169689Skan /* This option should be used with caution. */ \ 1346169689Skan product(bool, StressLdcRewrite, false, \ 1347169689Skan "Force ldc -> ldc_w rewrite during RedefineClasses") \ 1348169689Skan \ 1349169689Skan /* change to false by default sometime after Mustang */ \ 1350169689Skan product(bool, VerifyMergedCPBytecodes, true, \ 1351169689Skan "Verify bytecodes after RedefineClasses constant pool merging") \ 1352169689Skan \ 1353169689Skan develop(bool, TraceBytecodes, false, \ 1354169689Skan "Trace bytecode execution") \ 1355169689Skan \ 1356169689Skan develop(bool, TraceICs, false, \ 1357169689Skan "Trace inline cache changes") \ 1358169689Skan \ 1359169689Skan notproduct(bool, TraceInvocationCounterOverflow, false, \ 1360169689Skan "Trace method invocation counter overflow") \ 1361169689Skan \ 1362169689Skan develop(bool, TraceInlineCacheClearing, false, \ 1363169689Skan "Trace clearing of inline caches in nmethods") \ 1364169689Skan \ 1365169689Skan develop(bool, TraceDependencies, false, \ 1366169689Skan "Trace dependencies") \ 1367169689Skan \ 1368169689Skan develop(bool, VerifyDependencies, trueInDebug, \ 1369169689Skan "Exercise and verify the compilation dependency mechanism") \ 1370169689Skan \ 1371169689Skan develop(bool, TraceNewOopMapGeneration, false, \ 1372169689Skan "Trace OopMapGeneration") \ 1373169689Skan \ 1374169689Skan develop(bool, TraceNewOopMapGenerationDetailed, false, \ 1375169689Skan "Trace OopMapGeneration: print detailed cell states") \ 1376169689Skan \ 1377169689Skan develop(bool, TimeOopMap, false, \ 1378169689Skan "Time calls to GenerateOopMap::compute_map() in sum") \ 1379169689Skan \ 1380169689Skan develop(bool, TimeOopMap2, false, \ 1381169689Skan "Time calls to GenerateOopMap::compute_map() individually") \ 1382169689Skan \ 1383169689Skan develop(bool, TraceOopMapRewrites, false, \ 1384169689Skan "Trace rewriting of method oops during oop map generation") \ 1385169689Skan \ 1386169689Skan develop(bool, TraceICBuffer, false, \ 1387169689Skan "Trace usage of IC buffer") \ 1388169689Skan \ 1389169689Skan develop(bool, TraceCompiledIC, false, \ 1390169689Skan "Trace changes of compiled IC") \ 1391169689Skan \ 1392169689Skan /* gc */ \ 1393169689Skan \ 1394169689Skan product(bool, UseSerialGC, false, \ 1395169689Skan "Use the Serial garbage collector") \ 1396169689Skan \ 1397169689Skan product(bool, UseG1GC, false, \ 1398169689Skan "Use the Garbage-First garbage collector") \ 1399169689Skan \ 1400169689Skan product(bool, UseParallelGC, false, \ 1401169689Skan "Use the Parallel Scavenge garbage collector") \ 1402169689Skan \ 1403169689Skan product(bool, UseParallelOldGC, false, \ 1404169689Skan "Use the Parallel Old garbage collector") \ 1405169689Skan \ 1406169689Skan product(uintx, HeapMaximumCompactionInterval, 20, \ 1407169689Skan "How often should we maximally compact the heap (not allowing " \ 1408169689Skan "any dead space)") \ 1409169689Skan range(0, max_uintx) \ 1410169689Skan \ 1411169689Skan product(uintx, HeapFirstMaximumCompactionCount, 3, \ 1412169689Skan "The collection count for the first maximum compaction") \ 1413169689Skan range(0, max_uintx) \ 1414169689Skan \ 1415169689Skan product(bool, UseMaximumCompactionOnSystemGC, true, \ 1416169689Skan "Use maximum compaction in the Parallel Old garbage collector " \ 1417169689Skan "for a system GC") \ 1418169689Skan \ 1419169689Skan product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ 1420169689Skan "The mean used by the parallel compact dead wood " \ 1421169689Skan "limiter (a number between 0-100)") \ 1422169689Skan range(0, 100) \ 1423169689Skan \ 1424169689Skan product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ 1425169689Skan "The standard deviation used by the parallel compact dead wood " \ 1426169689Skan "limiter (a number between 0-100)") \ 1427169689Skan range(0, 100) \ 1428169689Skan \ 1429169689Skan product(uint, ParallelGCThreads, 0, \ 1430169689Skan "Number of parallel threads parallel gc will use") \ 1431169689Skan constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \ 1432169689Skan \ 1433169689Skan diagnostic(bool, UseSemaphoreGCThreadsSynchronization, true, \ 1434169689Skan "Use semaphore synchronization for the GC Threads, " \ 1435169689Skan "instead of synchronization based on mutexes") \ 1436169689Skan \ 1437169689Skan product(bool, UseDynamicNumberOfGCThreads, false, \ 1438169689Skan "Dynamically choose the number of parallel threads " \ 1439169689Skan "parallel gc will use") \ 1440169689Skan \ 1441169689Skan diagnostic(bool, InjectGCWorkerCreationFailure, false, \ 1442169689Skan "Inject thread creation failures for " \ 1443169689Skan "UseDynamicNumberOfGCThreads") \ 1444169689Skan \ 1445169689Skan diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ 1446169689Skan "Force dynamic selection of the number of " \ 1447169689Skan "parallel threads parallel gc will use to aid debugging") \ 1448169689Skan \ 1449169689Skan product(size_t, HeapSizePerGCThread, ScaleForWordSize(64*M), \ 1450169689Skan "Size of heap (bytes) per GC thread used in calculating the " \ 1451169689Skan "number of GC threads") \ 1452169689Skan range((size_t)os::vm_page_size(), (size_t)max_uintx) \ 1453169689Skan \ 1454169689Skan product(uint, ConcGCThreads, 0, \ 1455169689Skan "Number of threads concurrent gc will use") \ 1456169689Skan constraint(ConcGCThreadsConstraintFunc,AfterErgo) \ 1457169689Skan \ 1458169689Skan product(uintx, GCTaskTimeStampEntries, 200, \ 1459169689Skan "Number of time stamp entries per gc worker thread") \ 1460169689Skan range(1, max_uintx) \ 1461169689Skan \ 1462169689Skan product(bool, AlwaysTenure, false, \ 1463169689Skan "Always tenure objects in eden (ParallelGC only)") \ 1464169689Skan \ 1465169689Skan product(bool, NeverTenure, false, \ 1466169689Skan "Never tenure objects in eden, may tenure on overflow " \ 1467169689Skan "(ParallelGC only)") \ 1468169689Skan \ 1469169689Skan product(bool, ScavengeBeforeFullGC, true, \ 1470169689Skan "Scavenge youngest generation before each full GC.") \ 1471169689Skan \ 1472169689Skan product(bool, UseConcMarkSweepGC, false, \ 1473169689Skan "Use Concurrent Mark-Sweep GC in the old generation") \ 1474169689Skan \ 1475169689Skan product(bool, ExplicitGCInvokesConcurrent, false, \ 1476169689Skan "A System.gc() request invokes a concurrent collection; " \ 1477169689Skan "(effective only when using concurrent collectors)") \ 1478169689Skan \ 1479169689Skan product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ 1480169689Skan "A System.gc() request invokes a concurrent collection and " \ 1481169689Skan "also unloads classes during such a concurrent gc cycle " \ 1482169689Skan "(effective only when UseConcMarkSweepGC)") \ 1483169689Skan \ 1484169689Skan product(bool, GCLockerInvokesConcurrent, false, \ 1485169689Skan "The exit of a JNI critical section necessitating a scavenge, " \ 1486169689Skan "also kicks off a background concurrent collection") \ 1487169689Skan \ 1488169689Skan product(uintx, GCLockerEdenExpansionPercent, 5, \ 1489169689Skan "How much the GC can expand the eden by while the GC locker " \ 1490169689Skan "is active (as a percentage)") \ 1491169689Skan range(0, 100) \ 1492169689Skan \ 1493169689Skan diagnostic(uintx, GCLockerRetryAllocationCount, 2, \ 1494169689Skan "Number of times to retry allocations when " \ 1495169689Skan "blocked by the GC locker") \ 1496169689Skan range(0, max_uintx) \ 1497169689Skan \ 1498169689Skan product(bool, UseCMSBestFit, true, \ 1499169689Skan "Use CMS best fit allocation strategy") \ 1500169689Skan \ 1501169689Skan product(bool, UseParNewGC, false, \ 1502169689Skan "Use parallel threads in the new generation") \ 1503169689Skan \ 1504169689Skan product(uintx, ParallelGCBufferWastePct, 10, \ 1505169689Skan "Wasted fraction of parallel allocation buffer") \ 1506169689Skan range(0, 100) \ 1507169689Skan \ 1508169689Skan product(uintx, TargetPLABWastePct, 10, \ 1509169689Skan "Target wasted space in last buffer as percent of overall " \ 1510169689Skan "allocation") \ 1511169689Skan range(1, 100) \ 1512169689Skan \ 1513169689Skan product(uintx, PLABWeight, 75, \ 1514169689Skan "Percentage (0-100) used to weight the current sample when " \ 1515169689Skan "computing exponentially decaying average for ResizePLAB") \ 1516169689Skan range(0, 100) \ 1517169689Skan \ 1518169689Skan product(bool, ResizePLAB, true, \ 1519169689Skan "Dynamically resize (survivor space) promotion LAB's") \ 1520169689Skan \ 1521169689Skan product(int, ParGCArrayScanChunk, 50, \ 1522169689Skan "Scan a subset of object array and push remainder, if array is " \ 1523169689Skan "bigger than this") \ 1524169689Skan range(1, max_jint/3) \ 1525169689Skan \ 1526169689Skan product(bool, ParGCUseLocalOverflow, false, \ 1527169689Skan "Instead of a global overflow list, use local overflow stacks") \ 1528169689Skan \ 1529169689Skan product(bool, ParGCTrimOverflow, true, \ 1530169689Skan "Eagerly trim the local overflow lists " \ 1531169689Skan "(when ParGCUseLocalOverflow)") \ 1532169689Skan \ 1533169689Skan notproduct(bool, ParGCWorkQueueOverflowALot, false, \ 1534169689Skan "Simulate work queue overflow in ParNew") \ 1535169689Skan \ 1536169689Skan notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ 1537169689Skan "An `interval' counter that determines how frequently " \ 1538169689Skan "we simulate overflow; a smaller number increases frequency") \ 1539169689Skan \ 1540169689Skan product(uintx, ParGCDesiredObjsFromOverflowList, 20, \ 1541169689Skan "The desired number of objects to claim from the overflow list") \ 1542169689Skan range(0, max_uintx) \ 1543169689Skan \ 1544169689Skan diagnostic(uintx, ParGCStridesPerThread, 2, \ 1545169689Skan "The number of strides per worker thread that we divide up the " \ 1546169689Skan "card table scanning work into") \ 1547169689Skan range(1, max_uintx) \ 1548169689Skan constraint(ParGCStridesPerThreadConstraintFunc,AfterErgo) \ 1549169689Skan \ 1550169689Skan diagnostic(intx, ParGCCardsPerStrideChunk, 256, \ 1551169689Skan "The number of cards in each chunk of the parallel chunks used " \ 1552169689Skan "during card table scanning") \ 1553169689Skan range(1, max_intx) \ 1554169689Skan constraint(ParGCCardsPerStrideChunkConstraintFunc,AfterMemoryInit)\ 1555169689Skan \ 1556169689Skan product(uintx, OldPLABWeight, 50, \ 1557169689Skan "Percentage (0-100) used to weight the current sample when " \ 1558169689Skan "computing exponentially decaying average for resizing " \ 1559169689Skan "OldPLABSize") \ 1560169689Skan range(0, 100) \ 1561169689Skan \ 1562169689Skan product(bool, ResizeOldPLAB, true, \ 1563169689Skan "Dynamically resize (old gen) promotion LAB's") \ 1564169689Skan \ 1565169689Skan product(size_t, CMSOldPLABMax, 1024, \ 1566169689Skan "Maximum size of CMS gen promotion LAB caches per worker " \ 1567169689Skan "per block size") \ 1568169689Skan range(1, max_uintx) \ 1569169689Skan constraint(CMSOldPLABMaxConstraintFunc,AfterMemoryInit) \ 1570169689Skan \ 1571169689Skan product(size_t, CMSOldPLABMin, 16, \ 1572169689Skan "Minimum size of CMS gen promotion LAB caches per worker " \ 1573169689Skan "per block size") \ 1574169689Skan range(1, max_uintx) \ 1575169689Skan constraint(CMSOldPLABMinConstraintFunc,AfterMemoryInit) \ 1576169689Skan \ 1577169689Skan product(uintx, CMSOldPLABNumRefills, 4, \ 1578169689Skan "Nominal number of refills of CMS gen promotion LAB cache " \ 1579169689Skan "per worker per block size") \ 1580169689Skan range(1, max_uintx) \ 1581169689Skan \ 1582169689Skan product(bool, CMSOldPLABResizeQuicker, false, \ 1583169689Skan "React on-the-fly during a scavenge to a sudden " \ 1584169689Skan "change in block demand rate") \ 1585169689Skan \ 1586169689Skan product(uintx, CMSOldPLABToleranceFactor, 4, \ 1587169689Skan "The tolerance of the phase-change detector for on-the-fly " \ 1588169689Skan "PLAB resizing during a scavenge") \ 1589169689Skan range(1, max_uintx) \ 1590169689Skan \ 1591169689Skan product(uintx, CMSOldPLABReactivityFactor, 2, \ 1592169689Skan "The gain in the feedback loop for on-the-fly PLAB resizing " \ 1593169689Skan "during a scavenge") \ 1594169689Skan range(1, max_uintx) \ 1595169689Skan \ 1596169689Skan product(bool, AlwaysPreTouch, false, \ 1597169689Skan "Force all freshly committed pages to be pre-touched") \ 1598169689Skan \ 1599169689Skan product(size_t, PreTouchParallelChunkSize, 1 * G, \ 1600169689Skan "Per-thread chunk size for parallel memory pre-touch.") \ 1601169689Skan range(1, SIZE_MAX / 2) \ 1602169689Skan \ 1603169689Skan product_pd(size_t, CMSYoungGenPerWorker, \ 1604169689Skan "The maximum size of young gen chosen by default per GC worker " \ 1605169689Skan "thread available") \ 1606169689Skan range(1, max_uintx) \ 1607169689Skan \ 1608169689Skan product(uintx, CMSIncrementalSafetyFactor, 10, \ 1609169689Skan "Percentage (0-100) used to add conservatism when computing the " \ 1610169689Skan "duty cycle") \ 1611169689Skan range(0, 100) \ 1612169689Skan \ 1613169689Skan product(uintx, CMSExpAvgFactor, 50, \ 1614169689Skan "Percentage (0-100) used to weight the current sample when " \ 1615169689Skan "computing exponential averages for CMS statistics") \ 1616169689Skan range(0, 100) \ 1617169689Skan \ 1618169689Skan product(uintx, CMS_FLSWeight, 75, \ 1619169689Skan "Percentage (0-100) used to weight the current sample when " \ 1620169689Skan "computing exponentially decaying averages for CMS FLS " \ 1621169689Skan "statistics") \ 1622169689Skan range(0, 100) \ 1623169689Skan \ 1624169689Skan product(uintx, CMS_FLSPadding, 1, \ 1625169689Skan "The multiple of deviation from mean to use for buffering " \ 1626169689Skan "against volatility in free list demand") \ 1627169689Skan range(0, max_juint) \ 1628169689Skan \ 1629169689Skan product(uintx, FLSCoalescePolicy, 2, \ 1630169689Skan "CMS: aggressiveness level for coalescing, increasing " \ 1631169689Skan "from 0 to 4") \ 1632169689Skan range(0, 4) \ 1633169689Skan \ 1634169689Skan product(bool, FLSAlwaysCoalesceLarge, false, \ 1635169689Skan "CMS: larger free blocks are always available for coalescing") \ 1636169689Skan \ 1637169689Skan product(double, FLSLargestBlockCoalesceProximity, 0.99, \ 1638169689Skan "CMS: the smaller the percentage the greater the coalescing " \ 1639169689Skan "force") \ 1640169689Skan range(0.0, 1.0) \ 1641169689Skan \ 1642169689Skan product(double, CMSSmallCoalSurplusPercent, 1.05, \ 1643169689Skan "CMS: the factor by which to inflate estimated demand of small " \ 1644169689Skan "block sizes to prevent coalescing with an adjoining block") \ 1645169689Skan range(0.0, DBL_MAX) \ 1646169689Skan \ 1647169689Skan product(double, CMSLargeCoalSurplusPercent, 0.95, \ 1648169689Skan "CMS: the factor by which to inflate estimated demand of large " \ 1649169689Skan "block sizes to prevent coalescing with an adjoining block") \ 1650169689Skan range(0.0, DBL_MAX) \ 1651169689Skan \ 1652169689Skan product(double, CMSSmallSplitSurplusPercent, 1.10, \ 1653169689Skan "CMS: the factor by which to inflate estimated demand of small " \ 1654169689Skan "block sizes to prevent splitting to supply demand for smaller " \ 1655169689Skan "blocks") \ 1656169689Skan range(0.0, DBL_MAX) \ 1657169689Skan \ 1658169689Skan product(double, CMSLargeSplitSurplusPercent, 1.00, \ 1659169689Skan "CMS: the factor by which to inflate estimated demand of large " \ 1660169689Skan "block sizes to prevent splitting to supply demand for smaller " \ 1661169689Skan "blocks") \ 1662169689Skan range(0.0, DBL_MAX) \ 1663169689Skan \ 1664169689Skan product(bool, CMSExtrapolateSweep, false, \ 1665169689Skan "CMS: cushion for block demand during sweep") \ 1666169689Skan \ 1667169689Skan product(uintx, CMS_SweepWeight, 75, \ 1668169689Skan "Percentage (0-100) used to weight the current sample when " \ 1669169689Skan "computing exponentially decaying average for inter-sweep " \ 1670169689Skan "duration") \ 1671169689Skan range(0, 100) \ 1672169689Skan \ 1673169689Skan product(uintx, CMS_SweepPadding, 1, \ 1674169689Skan "The multiple of deviation from mean to use for buffering " \ 1675169689Skan "against volatility in inter-sweep duration") \ 1676169689Skan range(0, max_juint) \ 1677169689Skan \ 1678169689Skan product(uintx, CMS_SweepTimerThresholdMillis, 10, \ 1679169689Skan "Skip block flux-rate sampling for an epoch unless inter-sweep " \ 1680169689Skan "duration exceeds this threshold in milliseconds") \ 1681169689Skan range(0, max_uintx) \ 1682169689Skan \ 1683169689Skan product(bool, CMSClassUnloadingEnabled, true, \ 1684169689Skan "Whether class unloading enabled when using CMS GC") \ 1685169689Skan \ 1686169689Skan product(uintx, CMSClassUnloadingMaxInterval, 0, \ 1687169689Skan "When CMS class unloading is enabled, the maximum CMS cycle " \ 1688169689Skan "count for which classes may not be unloaded") \ 1689169689Skan range(0, max_uintx) \ 1690169689Skan \ 1691169689Skan product(uintx, CMSIndexedFreeListReplenish, 4, \ 1692169689Skan "Replenish an indexed free list with this number of chunks") \ 1693169689Skan range(1, max_uintx) \ 1694169689Skan \ 1695169689Skan product(bool, CMSReplenishIntermediate, true, \ 1696169689Skan "Replenish all intermediate free-list caches") \ 1697169689Skan \ 1698169689Skan product(bool, CMSSplitIndexedFreeListBlocks, true, \ 1699169689Skan "When satisfying batched demand, split blocks from the " \ 1700169689Skan "IndexedFreeList whose size is a multiple of requested size") \ 1701169689Skan \ 1702169689Skan product(bool, CMSLoopWarn, false, \ 1703169689Skan "Warn in case of excessive CMS looping") \ 1704169689Skan \ 1705169689Skan /* where does the range max value of (max_jint - 1) come from? */ \ 1706169689Skan product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ 1707169689Skan "Maximum size of marking stack") \ 1708169689Skan range(1, (max_jint - 1)) \ 1709169689Skan \ 1710169689Skan product(size_t, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ 1711169689Skan "Size of marking stack") \ 1712169689Skan constraint(MarkStackSizeConstraintFunc,AfterErgo) \ 1713169689Skan \ 1714169689Skan notproduct(bool, CMSMarkStackOverflowALot, false, \ 1715169689Skan "Simulate frequent marking stack / work queue overflow") \ 1716169689Skan \ 1717169689Skan notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \ 1718169689Skan "An \"interval\" counter that determines how frequently " \ 1719169689Skan "to simulate overflow; a smaller number increases frequency") \ 1720169689Skan \ 1721169689Skan product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ 1722169689Skan "Maximum number of abortable preclean iterations, if > 0") \ 1723169689Skan range(0, max_uintx) \ 1724169689Skan \ 1725169689Skan product(intx, CMSMaxAbortablePrecleanTime, 5000, \ 1726169689Skan "Maximum time in abortable preclean (in milliseconds)") \ 1727169689Skan range(0, max_intx) \ 1728169689Skan \ 1729169689Skan product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ 1730169689Skan "Nominal minimum work per abortable preclean iteration") \ 1731169689Skan range(0, max_uintx) \ 1732169689Skan \ 1733 manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ 1734 "Time that we sleep between iterations when not given " \ 1735 "enough work per iteration") \ 1736 range(0, max_intx) \ 1737 \ 1738 /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */ \ 1739 product(size_t, CMSRescanMultiple, 32, \ 1740 "Size (in cards) of CMS parallel rescan task") \ 1741 range(1, SIZE_MAX / 4096) \ 1742 constraint(CMSRescanMultipleConstraintFunc,AfterMemoryInit) \ 1743 \ 1744 /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */ \ 1745 product(size_t, CMSConcMarkMultiple, 32, \ 1746 "Size (in cards) of CMS concurrent MT marking task") \ 1747 range(1, SIZE_MAX / 4096) \ 1748 constraint(CMSConcMarkMultipleConstraintFunc,AfterMemoryInit) \ 1749 \ 1750 product(bool, CMSAbortSemantics, false, \ 1751 "Whether abort-on-overflow semantics is implemented") \ 1752 \ 1753 product(bool, CMSParallelInitialMarkEnabled, true, \ 1754 "Use the parallel initial mark.") \ 1755 \ 1756 product(bool, CMSParallelRemarkEnabled, true, \ 1757 "Whether parallel remark enabled (only if ParNewGC)") \ 1758 \ 1759 product(bool, CMSParallelSurvivorRemarkEnabled, true, \ 1760 "Whether parallel remark of survivor space " \ 1761 "enabled (effective only if CMSParallelRemarkEnabled)") \ 1762 \ 1763 product(bool, CMSPLABRecordAlways, true, \ 1764 "Always record survivor space PLAB boundaries (effective only " \ 1765 "if CMSParallelSurvivorRemarkEnabled)") \ 1766 \ 1767 product(bool, CMSEdenChunksRecordAlways, true, \ 1768 "Always record eden chunks used for the parallel initial mark " \ 1769 "or remark of eden") \ 1770 \ 1771 product(bool, CMSConcurrentMTEnabled, true, \ 1772 "Whether multi-threaded concurrent work enabled " \ 1773 "(effective only if ParNewGC)") \ 1774 \ 1775 product(bool, CMSPrecleaningEnabled, true, \ 1776 "Whether concurrent precleaning enabled") \ 1777 \ 1778 product(uintx, CMSPrecleanIter, 3, \ 1779 "Maximum number of precleaning iteration passes") \ 1780 range(0, 9) \ 1781 \ 1782 product(uintx, CMSPrecleanDenominator, 3, \ 1783 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 1784 "ratio") \ 1785 range(1, max_uintx) \ 1786 constraint(CMSPrecleanDenominatorConstraintFunc,AfterErgo) \ 1787 \ 1788 product(uintx, CMSPrecleanNumerator, 2, \ 1789 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 1790 "ratio") \ 1791 range(0, max_uintx-1) \ 1792 constraint(CMSPrecleanNumeratorConstraintFunc,AfterErgo) \ 1793 \ 1794 product(bool, CMSPrecleanRefLists1, true, \ 1795 "Preclean ref lists during (initial) preclean phase") \ 1796 \ 1797 product(bool, CMSPrecleanRefLists2, false, \ 1798 "Preclean ref lists during abortable preclean phase") \ 1799 \ 1800 product(bool, CMSPrecleanSurvivors1, false, \ 1801 "Preclean survivors during (initial) preclean phase") \ 1802 \ 1803 product(bool, CMSPrecleanSurvivors2, true, \ 1804 "Preclean survivors during abortable preclean phase") \ 1805 \ 1806 product(uintx, CMSPrecleanThreshold, 1000, \ 1807 "Do not iterate again if number of dirty cards is less than this")\ 1808 range(100, max_uintx) \ 1809 \ 1810 product(bool, CMSCleanOnEnter, true, \ 1811 "Clean-on-enter optimization for reducing number of dirty cards") \ 1812 \ 1813 product(uintx, CMSRemarkVerifyVariant, 1, \ 1814 "Choose variant (1,2) of verification following remark") \ 1815 range(1, 2) \ 1816 \ 1817 product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ 1818 "If Eden size is below this, do not try to schedule remark") \ 1819 range(0, max_uintx) \ 1820 \ 1821 product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ 1822 "The Eden occupancy percentage (0-100) at which " \ 1823 "to try and schedule remark pause") \ 1824 range(0, 100) \ 1825 \ 1826 product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ 1827 "Start sampling eden top at least before young gen " \ 1828 "occupancy reaches 1/<ratio> of the size at which " \ 1829 "we plan to schedule remark") \ 1830 range(1, max_uintx) \ 1831 \ 1832 product(uintx, CMSSamplingGrain, 16*K, \ 1833 "The minimum distance between eden samples for CMS (see above)") \ 1834 range(ObjectAlignmentInBytes, max_uintx) \ 1835 constraint(CMSSamplingGrainConstraintFunc,AfterMemoryInit) \ 1836 \ 1837 product(bool, CMSScavengeBeforeRemark, false, \ 1838 "Attempt scavenge before the CMS remark step") \ 1839 \ 1840 product(uintx, CMSWorkQueueDrainThreshold, 10, \ 1841 "Don't drain below this size per parallel worker/thief") \ 1842 range(1, max_juint) \ 1843 constraint(CMSWorkQueueDrainThresholdConstraintFunc,AfterErgo) \ 1844 \ 1845 manageable(intx, CMSWaitDuration, 2000, \ 1846 "Time in milliseconds that CMS thread waits for young GC") \ 1847 range(min_jint, max_jint) \ 1848 \ 1849 develop(uintx, CMSCheckInterval, 1000, \ 1850 "Interval in milliseconds that CMS thread checks if it " \ 1851 "should start a collection cycle") \ 1852 \ 1853 product(bool, CMSYield, true, \ 1854 "Yield between steps of CMS") \ 1855 \ 1856 product(size_t, CMSBitMapYieldQuantum, 10*M, \ 1857 "Bitmap operations should process at most this many bits " \ 1858 "between yields") \ 1859 range(1, max_uintx) \ 1860 constraint(CMSBitMapYieldQuantumConstraintFunc,AfterMemoryInit) \ 1861 \ 1862 product(bool, CMSPrintChunksInDump, false, \ 1863 "If logging for the \"gc\" and \"promotion\" tags is enabled on" \ 1864 "trace level include more detailed information about the" \ 1865 "free chunks") \ 1866 \ 1867 product(bool, CMSPrintObjectsInDump, false, \ 1868 "If logging for the \"gc\" and \"promotion\" tags is enabled on" \ 1869 "trace level include more detailed information about the" \ 1870 "allocated objects") \ 1871 \ 1872 diagnostic(bool, FLSVerifyAllHeapReferences, false, \ 1873 "Verify that all references across the FLS boundary " \ 1874 "are to valid objects") \ 1875 \ 1876 diagnostic(bool, FLSVerifyLists, false, \ 1877 "Do lots of (expensive) FreeListSpace verification") \ 1878 \ 1879 diagnostic(bool, FLSVerifyIndexTable, false, \ 1880 "Do lots of (expensive) FLS index table verification") \ 1881 \ 1882 develop(bool, FLSVerifyDictionary, false, \ 1883 "Do lots of (expensive) FLS dictionary verification") \ 1884 \ 1885 develop(bool, VerifyBlockOffsetArray, false, \ 1886 "Do (expensive) block offset array verification") \ 1887 \ 1888 diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ 1889 "Maintain _unallocated_block in BlockOffsetArray " \ 1890 "(currently applicable only to CMS collector)") \ 1891 \ 1892 product(intx, RefDiscoveryPolicy, 0, \ 1893 "Select type of reference discovery policy: " \ 1894 "reference-based(0) or referent-based(1)") \ 1895 range(ReferenceProcessor::DiscoveryPolicyMin, \ 1896 ReferenceProcessor::DiscoveryPolicyMax) \ 1897 \ 1898 product(bool, ParallelRefProcEnabled, false, \ 1899 "Enable parallel reference processing whenever possible") \ 1900 \ 1901 product(bool, ParallelRefProcBalancingEnabled, true, \ 1902 "Enable balancing of reference processing queues") \ 1903 \ 1904 product(uintx, CMSTriggerRatio, 80, \ 1905 "Percentage of MinHeapFreeRatio in CMS generation that is " \ 1906 "allocated before a CMS collection cycle commences") \ 1907 range(0, 100) \ 1908 \ 1909 product(uintx, CMSBootstrapOccupancy, 50, \ 1910 "Percentage CMS generation occupancy at which to " \ 1911 "initiate CMS collection for bootstrapping collection stats") \ 1912 range(0, 100) \ 1913 \ 1914 product(intx, CMSInitiatingOccupancyFraction, -1, \ 1915 "Percentage CMS generation occupancy to start a CMS collection " \ 1916 "cycle. A negative value means that CMSTriggerRatio is used") \ 1917 range(min_intx, 100) \ 1918 \ 1919 product(uintx, InitiatingHeapOccupancyPercent, 45, \ 1920 "The percent occupancy (IHOP) of the current old generation " \ 1921 "capacity above which a concurrent mark cycle will be initiated " \ 1922 "Its value may change over time if adaptive IHOP is enabled, " \ 1923 "otherwise the value remains constant. " \ 1924 "In the latter case a value of 0 will result as frequent as " \ 1925 "possible concurrent marking cycles. A value of 100 disables " \ 1926 "concurrent marking. " \ 1927 "Fragmentation waste in the old generation is not considered " \ 1928 "free space in this calculation. (G1 collector only)") \ 1929 range(0, 100) \ 1930 \ 1931 manageable(intx, CMSTriggerInterval, -1, \ 1932 "Commence a CMS collection cycle (at least) every so many " \ 1933 "milliseconds (0 permanently, -1 disabled)") \ 1934 range(-1, max_intx) \ 1935 \ 1936 product(bool, UseCMSInitiatingOccupancyOnly, false, \ 1937 "Only use occupancy as a criterion for starting a CMS collection")\ 1938 \ 1939 product(uintx, CMSIsTooFullPercentage, 98, \ 1940 "An absolute ceiling above which CMS will always consider the " \ 1941 "unloading of classes when class unloading is enabled") \ 1942 range(0, 100) \ 1943 \ 1944 develop(bool, CMSTestInFreeList, false, \ 1945 "Check if the coalesced range is already in the " \ 1946 "free lists as claimed") \ 1947 \ 1948 notproduct(bool, CMSVerifyReturnedBytes, false, \ 1949 "Check that all the garbage collected was returned to the " \ 1950 "free lists") \ 1951 \ 1952 notproduct(bool, ScavengeALot, false, \ 1953 "Force scavenge at every Nth exit from the runtime system " \ 1954 "(N=ScavengeALotInterval)") \ 1955 \ 1956 develop(bool, FullGCALot, false, \ 1957 "Force full gc at every Nth exit from the runtime system " \ 1958 "(N=FullGCALotInterval)") \ 1959 \ 1960 notproduct(bool, GCALotAtAllSafepoints, false, \ 1961 "Enforce ScavengeALot/GCALot at all potential safepoints") \ 1962 \ 1963 notproduct(bool, PromotionFailureALot, false, \ 1964 "Use promotion failure handling on every youngest generation " \ 1965 "collection") \ 1966 \ 1967 develop(uintx, PromotionFailureALotCount, 1000, \ 1968 "Number of promotion failures occurring at PLAB " \ 1969 "refill attempts (ParNew) or promotion attempts " \ 1970 "(other young collectors)") \ 1971 \ 1972 develop(uintx, PromotionFailureALotInterval, 5, \ 1973 "Total collections between promotion failures a lot") \ 1974 \ 1975 experimental(uintx, WorkStealingSleepMillis, 1, \ 1976 "Sleep time when sleep is used for yields") \ 1977 \ 1978 experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \ 1979 "Number of yields before a sleep is done during work stealing") \ 1980 \ 1981 experimental(uintx, WorkStealingHardSpins, 4096, \ 1982 "Number of iterations in a spin loop between checks on " \ 1983 "time out of hard spin") \ 1984 \ 1985 experimental(uintx, WorkStealingSpinToYieldRatio, 10, \ 1986 "Ratio of hard spins to calls to yield") \ 1987 \ 1988 develop(uintx, ObjArrayMarkingStride, 512, \ 1989 "Number of object array elements to push onto the marking stack " \ 1990 "before pushing a continuation entry") \ 1991 \ 1992 develop(bool, MetadataAllocationFailALot, false, \ 1993 "Fail metadata allocations at intervals controlled by " \ 1994 "MetadataAllocationFailALotInterval") \ 1995 \ 1996 develop(uintx, MetadataAllocationFailALotInterval, 1000, \ 1997 "Metadata allocation failure a lot interval") \ 1998 \ 1999 notproduct(bool, ExecuteInternalVMTests, false, \ 2000 "Enable execution of internal VM tests") \ 2001 \ 2002 notproduct(bool, VerboseInternalVMTests, false, \ 2003 "Turn on logging for internal VM tests.") \ 2004 \ 2005 product(bool, ExecutingUnitTests, false, \ 2006 "Whether the JVM is running unit tests or not") \ 2007 \ 2008 product_pd(bool, UseTLAB, "Use thread-local object allocation") \ 2009 \ 2010 product_pd(bool, ResizeTLAB, \ 2011 "Dynamically resize TLAB size for threads") \ 2012 \ 2013 product(bool, ZeroTLAB, false, \ 2014 "Zero out the newly created TLAB") \ 2015 \ 2016 product(bool, FastTLABRefill, true, \ 2017 "Use fast TLAB refill code") \ 2018 \ 2019 product(bool, TLABStats, true, \ 2020 "Provide more detailed and expensive TLAB statistics.") \ 2021 \ 2022 product_pd(bool, NeverActAsServerClassMachine, \ 2023 "Never act like a server-class machine") \ 2024 \ 2025 product(bool, AlwaysActAsServerClassMachine, false, \ 2026 "Always act like a server-class machine") \ 2027 \ 2028 product_pd(uint64_t, MaxRAM, \ 2029 "Real memory size (in bytes) used to set maximum heap size") \ 2030 range(0, 0XFFFFFFFFFFFFFFFF) \ 2031 \ 2032 product(size_t, ErgoHeapSizeLimit, 0, \ 2033 "Maximum ergonomically set heap size (in bytes); zero means use " \ 2034 "MaxRAM / MaxRAMFraction") \ 2035 range(0, max_uintx) \ 2036 \ 2037 product(uintx, MaxRAMFraction, 4, \ 2038 "Maximum fraction (1/n) of real memory used for maximum heap " \ 2039 "size") \ 2040 range(1, max_uintx) \ 2041 \ 2042 product(uintx, MinRAMFraction, 2, \ 2043 "Minimum fraction (1/n) of real memory used for maximum heap " \ 2044 "size on systems with small physical memory size") \ 2045 range(1, max_uintx) \ 2046 \ 2047 product(uintx, InitialRAMFraction, 64, \ 2048 "Fraction (1/n) of real memory used for initial heap size") \ 2049 range(1, max_uintx) \ 2050 \ 2051 develop(uintx, MaxVirtMemFraction, 2, \ 2052 "Maximum fraction (1/n) of virtual memory used for ergonomically "\ 2053 "determining maximum heap size") \ 2054 \ 2055 product(bool, UseAutoGCSelectPolicy, false, \ 2056 "Use automatic collection selection policy") \ 2057 \ 2058 product(uintx, AutoGCSelectPauseMillis, 5000, \ 2059 "Automatic GC selection pause threshold in milliseconds") \ 2060 range(0, max_uintx) \ 2061 \ 2062 product(bool, UseAdaptiveSizePolicy, true, \ 2063 "Use adaptive generation sizing policies") \ 2064 \ 2065 product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \ 2066 "Use adaptive survivor sizing policies") \ 2067 \ 2068 product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \ 2069 "Use adaptive young-old sizing policies at minor collections") \ 2070 \ 2071 product(bool, UseAdaptiveGenerationSizePolicyAtMajorCollection, true, \ 2072 "Use adaptive young-old sizing policies at major collections") \ 2073 \ 2074 product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ 2075 "Include statistics from System.gc() for adaptive size policy") \ 2076 \ 2077 product(bool, UseAdaptiveGCBoundary, false, \ 2078 "Allow young-old boundary to move") \ 2079 \ 2080 develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1, \ 2081 "Resize the virtual spaces of the young or old generations") \ 2082 range(-1, 1) \ 2083 \ 2084 product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ 2085 "Policy for changing generation size for throughput goals") \ 2086 range(0, 1) \ 2087 \ 2088 product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ 2089 "Number of steps where heuristics is used before data is used") \ 2090 range(0, max_uintx) \ 2091 \ 2092 develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \ 2093 "Number of collections before the adaptive sizing is started") \ 2094 \ 2095 product(uintx, AdaptiveSizePolicyOutputInterval, 0, \ 2096 "Collection interval for printing information; zero means never") \ 2097 range(0, max_uintx) \ 2098 \ 2099 product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \ 2100 "Use adaptive minimum footprint as a goal") \ 2101 \ 2102 product(uintx, AdaptiveSizePolicyWeight, 10, \ 2103 "Weight given to exponential resizing, between 0 and 100") \ 2104 range(0, 100) \ 2105 \ 2106 product(uintx, AdaptiveTimeWeight, 25, \ 2107 "Weight given to time in adaptive policy, between 0 and 100") \ 2108 range(0, 100) \ 2109 \ 2110 product(uintx, PausePadding, 1, \ 2111 "How much buffer to keep for pause time") \ 2112 range(0, max_juint) \ 2113 \ 2114 product(uintx, PromotedPadding, 3, \ 2115 "How much buffer to keep for promotion failure") \ 2116 range(0, max_juint) \ 2117 \ 2118 product(uintx, SurvivorPadding, 3, \ 2119 "How much buffer to keep for survivor overflow") \ 2120 range(0, max_juint) \ 2121 \ 2122 product(uintx, ThresholdTolerance, 10, \ 2123 "Allowed collection cost difference between generations") \ 2124 range(0, 100) \ 2125 \ 2126 product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \ 2127 "If collection costs are within margin, reduce both by full " \ 2128 "delta") \ 2129 range(0, 100) \ 2130 \ 2131 product(uintx, YoungGenerationSizeIncrement, 20, \ 2132 "Adaptive size percentage change in young generation") \ 2133 range(0, 100) \ 2134 \ 2135 product(uintx, YoungGenerationSizeSupplement, 80, \ 2136 "Supplement to YoungedGenerationSizeIncrement used at startup") \ 2137 range(0, 100) \ 2138 \ 2139 product(uintx, YoungGenerationSizeSupplementDecay, 8, \ 2140 "Decay factor to YoungedGenerationSizeSupplement") \ 2141 range(1, max_uintx) \ 2142 \ 2143 product(uintx, TenuredGenerationSizeIncrement, 20, \ 2144 "Adaptive size percentage change in tenured generation") \ 2145 range(0, 100) \ 2146 \ 2147 product(uintx, TenuredGenerationSizeSupplement, 80, \ 2148 "Supplement to TenuredGenerationSizeIncrement used at startup") \ 2149 range(0, 100) \ 2150 \ 2151 product(uintx, TenuredGenerationSizeSupplementDecay, 2, \ 2152 "Decay factor to TenuredGenerationSizeIncrement") \ 2153 range(1, max_uintx) \ 2154 \ 2155 product(uintx, MaxGCPauseMillis, max_uintx - 1, \ 2156 "Adaptive size policy maximum GC pause time goal in millisecond, "\ 2157 "or (G1 Only) the maximum GC time per MMU time slice") \ 2158 range(1, max_uintx - 1) \ 2159 constraint(MaxGCPauseMillisConstraintFunc,AfterErgo) \ 2160 \ 2161 product(uintx, GCPauseIntervalMillis, 0, \ 2162 "Time slice for MMU specification") \ 2163 constraint(GCPauseIntervalMillisConstraintFunc,AfterErgo) \ 2164 \ 2165 product(uintx, MaxGCMinorPauseMillis, max_uintx, \ 2166 "Adaptive size policy maximum GC minor pause time goal " \ 2167 "in millisecond") \ 2168 range(0, max_uintx) \ 2169 \ 2170 product(uintx, GCTimeRatio, 99, \ 2171 "Adaptive size policy application time to GC time ratio") \ 2172 range(0, max_juint) \ 2173 \ 2174 product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \ 2175 "Adaptive size scale down factor for shrinking") \ 2176 range(1, max_uintx) \ 2177 \ 2178 product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \ 2179 "Adaptive size decays the major cost for long major intervals") \ 2180 \ 2181 product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \ 2182 "Time scale over which major costs decay") \ 2183 range(0, max_uintx) \ 2184 \ 2185 product(uintx, MinSurvivorRatio, 3, \ 2186 "Minimum ratio of young generation/survivor space size") \ 2187 range(3, max_uintx) \ 2188 \ 2189 product(uintx, InitialSurvivorRatio, 8, \ 2190 "Initial ratio of young generation/survivor space size") \ 2191 range(0, max_uintx) \ 2192 \ 2193 product(size_t, BaseFootPrintEstimate, 256*M, \ 2194 "Estimate of footprint other than Java Heap") \ 2195 range(0, max_uintx) \ 2196 \ 2197 product(bool, UseGCOverheadLimit, true, \ 2198 "Use policy to limit of proportion of time spent in GC " \ 2199 "before an OutOfMemory error is thrown") \ 2200 \ 2201 product(uintx, GCTimeLimit, 98, \ 2202 "Limit of the proportion of time spent in GC before " \ 2203 "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \ 2204 range(0, 100) \ 2205 \ 2206 product(uintx, GCHeapFreeLimit, 2, \ 2207 "Minimum percentage of free space after a full GC before an " \ 2208 "OutOfMemoryError is thrown (used with GCTimeLimit)") \ 2209 range(0, 100) \ 2210 \ 2211 develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5, \ 2212 "Number of consecutive collections before gc time limit fires") \ 2213 range(1, max_uintx) \ 2214 \ 2215 product(intx, PrefetchCopyIntervalInBytes, -1, \ 2216 "How far ahead to prefetch destination area (<= 0 means off)") \ 2217 range(-1, max_jint) \ 2218 \ 2219 product(intx, PrefetchScanIntervalInBytes, -1, \ 2220 "How far ahead to prefetch scan area (<= 0 means off)") \ 2221 range(-1, max_jint) \ 2222 \ 2223 product(intx, PrefetchFieldsAhead, -1, \ 2224 "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ 2225 range(-1, max_jint) \ 2226 \ 2227 diagnostic(bool, VerifyDuringStartup, false, \ 2228 "Verify memory system before executing any Java code " \ 2229 "during VM initialization") \ 2230 \ 2231 diagnostic(bool, VerifyBeforeExit, trueInDebug, \ 2232 "Verify system before exiting") \ 2233 \ 2234 diagnostic(bool, VerifyBeforeGC, false, \ 2235 "Verify memory system before GC") \ 2236 \ 2237 diagnostic(bool, VerifyAfterGC, false, \ 2238 "Verify memory system after GC") \ 2239 \ 2240 diagnostic(bool, VerifyDuringGC, false, \ 2241 "Verify memory system during GC (between phases)") \ 2242 \ 2243 diagnostic(ccstrlist, VerifySubSet, "", \ 2244 "Memory sub-systems to verify when Verify*GC flag(s) " \ 2245 "are enabled. One or more sub-systems can be specified " \ 2246 "in a comma separated string. Sub-systems are: " \ 2247 "threads, heap, symbol_table, string_table, codecache, " \ 2248 "dictionary, classloader_data_graph, metaspace, jni_handles, " \ 2249 "codecache_oops") \ 2250 \ 2251 diagnostic(bool, GCParallelVerificationEnabled, true, \ 2252 "Enable parallel memory system verification") \ 2253 \ 2254 diagnostic(bool, DeferInitialCardMark, false, \ 2255 "When +ReduceInitialCardMarks, explicitly defer any that " \ 2256 "may arise from new_pre_store_barrier") \ 2257 \ 2258 product(bool, UseCondCardMark, false, \ 2259 "Check for already marked card before updating card table") \ 2260 \ 2261 diagnostic(bool, VerifyRememberedSets, false, \ 2262 "Verify GC remembered sets") \ 2263 \ 2264 diagnostic(bool, VerifyObjectStartArray, true, \ 2265 "Verify GC object start array if verify before/after") \ 2266 \ 2267 product(bool, DisableExplicitGC, false, \ 2268 "Ignore calls to System.gc()") \ 2269 \ 2270 notproduct(bool, CheckMemoryInitialization, false, \ 2271 "Check memory initialization") \ 2272 \ 2273 diagnostic(bool, BindCMSThreadToCPU, false, \ 2274 "Bind CMS Thread to CPU if possible") \ 2275 \ 2276 diagnostic(uintx, CPUForCMSThread, 0, \ 2277 "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \ 2278 range(0, max_juint) \ 2279 \ 2280 product(bool, BindGCTaskThreadsToCPUs, false, \ 2281 "Bind GCTaskThreads to CPUs if possible") \ 2282 \ 2283 product(bool, UseGCTaskAffinity, false, \ 2284 "Use worker affinity when asking for GCTasks") \ 2285 \ 2286 product(uintx, ProcessDistributionStride, 4, \ 2287 "Stride through processors when distributing processes") \ 2288 range(0, max_juint) \ 2289 \ 2290 product(uintx, CMSCoordinatorYieldSleepCount, 10, \ 2291 "Number of times the coordinator GC thread will sleep while " \ 2292 "yielding before giving up and resuming GC") \ 2293 range(0, max_juint) \ 2294 \ 2295 product(uintx, CMSYieldSleepCount, 0, \ 2296 "Number of times a GC thread (minus the coordinator) " \ 2297 "will sleep while yielding before giving up and resuming GC") \ 2298 range(0, max_juint) \ 2299 \ 2300 product(bool, PrintGC, false, \ 2301 "Print message at garbage collection. " \ 2302 "Deprecated, use -Xlog:gc instead.") \ 2303 \ 2304 product(bool, PrintGCDetails, false, \ 2305 "Print more details at garbage collection. " \ 2306 "Deprecated, use -Xlog:gc* instead.") \ 2307 \ 2308 develop(intx, ConcGCYieldTimeout, 0, \ 2309 "If non-zero, assert that GC threads yield within this " \ 2310 "number of milliseconds") \ 2311 range(0, max_intx) \ 2312 \ 2313 develop(bool, TraceFinalizerRegistration, false, \ 2314 "Trace registration of final references") \ 2315 \ 2316 notproduct(bool, TraceScavenge, false, \ 2317 "Trace scavenge") \ 2318 \ 2319 product(bool, IgnoreEmptyClassPaths, false, \ 2320 "Ignore empty path elements in -classpath") \ 2321 \ 2322 product(size_t, InitialBootClassLoaderMetaspaceSize, \ 2323 NOT_LP64(2200*K) LP64_ONLY(4*M), \ 2324 "Initial size of the boot class loader data metaspace") \ 2325 range(30*K, max_uintx/BytesPerWord) \ 2326 constraint(InitialBootClassLoaderMetaspaceSizeConstraintFunc, AfterErgo)\ 2327 \ 2328 product(bool, TraceYoungGenTime, false, \ 2329 "Trace accumulated time for young collection") \ 2330 \ 2331 product(bool, TraceOldGenTime, false, \ 2332 "Trace accumulated time for old collection") \ 2333 \ 2334 product(bool, PrintHeapAtSIGBREAK, true, \ 2335 "Print heap layout in response to SIGBREAK") \ 2336 \ 2337 manageable(bool, PrintClassHistogram, false, \ 2338 "Print a histogram of class instances") \ 2339 \ 2340 develop(bool, TraceGCTaskManager, false, \ 2341 "Trace actions of the GC task manager") \ 2342 \ 2343 develop(bool, TraceGCTaskQueue, false, \ 2344 "Trace actions of the GC task queues") \ 2345 \ 2346 develop(bool, TraceParallelOldGCMarkingPhase, false, \ 2347 "Trace marking phase in ParallelOldGC") \ 2348 \ 2349 develop(bool, TraceParallelOldGCDensePrefix, false, \ 2350 "Trace dense prefix computation for ParallelOldGC") \ 2351 \ 2352 develop(bool, IgnoreLibthreadGPFault, false, \ 2353 "Suppress workaround for libthread GP fault") \ 2354 \ 2355 experimental(double, ObjectCountCutOffPercent, 0.5, \ 2356 "The percentage of the used heap that the instances of a class " \ 2357 "must occupy for the class to generate a trace event") \ 2358 range(0.0, 100.0) \ 2359 \ 2360 /* JVMTI heap profiling */ \ 2361 \ 2362 diagnostic(bool, TraceJVMTIObjectTagging, false, \ 2363 "Trace JVMTI object tagging calls") \ 2364 \ 2365 diagnostic(bool, VerifyBeforeIteration, false, \ 2366 "Verify memory system before JVMTI iteration") \ 2367 \ 2368 /* compiler interface */ \ 2369 \ 2370 develop(bool, CIPrintCompilerName, false, \ 2371 "when CIPrint is active, print the name of the active compiler") \ 2372 \ 2373 diagnostic(bool, CIPrintCompileQueue, false, \ 2374 "display the contents of the compile queue whenever a " \ 2375 "compilation is enqueued") \ 2376 \ 2377 develop(bool, CIPrintRequests, false, \ 2378 "display every request for compilation") \ 2379 \ 2380 product(bool, CITime, false, \ 2381 "collect timing information for compilation") \ 2382 \ 2383 develop(bool, CITimeVerbose, false, \ 2384 "be more verbose in compilation timings") \ 2385 \ 2386 develop(bool, CITimeEach, false, \ 2387 "display timing information after each successful compilation") \ 2388 \ 2389 develop(bool, CICountOSR, false, \ 2390 "use a separate counter when assigning ids to osr compilations") \ 2391 \ 2392 develop(bool, CICompileNatives, true, \ 2393 "compile native methods if supported by the compiler") \ 2394 \ 2395 develop_pd(bool, CICompileOSR, \ 2396 "compile on stack replacement methods if supported by the " \ 2397 "compiler") \ 2398 \ 2399 develop(bool, CIPrintMethodCodes, false, \ 2400 "print method bytecodes of the compiled code") \ 2401 \ 2402 develop(bool, CIPrintTypeFlow, false, \ 2403 "print the results of ciTypeFlow analysis") \ 2404 \ 2405 develop(bool, CITraceTypeFlow, false, \ 2406 "detailed per-bytecode tracing of ciTypeFlow analysis") \ 2407 \ 2408 develop(intx, OSROnlyBCI, -1, \ 2409 "OSR only at this bci. Negative values mean exclude that bci") \ 2410 \ 2411 /* compiler */ \ 2412 \ 2413 /* notice: the max range value here is max_jint, not max_intx */ \ 2414 /* because of overflow issue */ \ 2415 product(intx, CICompilerCount, CI_COMPILER_COUNT, \ 2416 "Number of compiler threads to run") \ 2417 range(0, max_jint) \ 2418 constraint(CICompilerCountConstraintFunc, AfterErgo) \ 2419 \ 2420 product(intx, CompilationPolicyChoice, 0, \ 2421 "which compilation policy (0-3)") \ 2422 range(0, 3) \ 2423 \ 2424 develop(bool, UseStackBanging, true, \ 2425 "use stack banging for stack overflow checks (required for " \ 2426 "proper StackOverflow handling; disable only to measure cost " \ 2427 "of stackbanging)") \ 2428 \ 2429 develop(bool, UseStrictFP, true, \ 2430 "use strict fp if modifier strictfp is set") \ 2431 \ 2432 develop(bool, GenerateSynchronizationCode, true, \ 2433 "generate locking/unlocking code for synchronized methods and " \ 2434 "monitors") \ 2435 \ 2436 develop(bool, GenerateRangeChecks, true, \ 2437 "Generate range checks for array accesses") \ 2438 \ 2439 diagnostic_pd(bool, ImplicitNullChecks, \ 2440 "Generate code for implicit null checks") \ 2441 \ 2442 product_pd(bool, TrapBasedNullChecks, \ 2443 "Generate code for null checks that uses a cmp and trap " \ 2444 "instruction raising SIGTRAP. This is only used if an access to" \ 2445 "null (+offset) will not raise a SIGSEGV, i.e.," \ 2446 "ImplicitNullChecks don't work (PPC64).") \ 2447 \ 2448 product(bool, PrintSafepointStatistics, false, \ 2449 "Print statistics about safepoint synchronization") \ 2450 \ 2451 product(intx, PrintSafepointStatisticsCount, 300, \ 2452 "Total number of safepoint statistics collected " \ 2453 "before printing them out") \ 2454 range(1, max_intx) \ 2455 \ 2456 product(intx, PrintSafepointStatisticsTimeout, -1, \ 2457 "Print safepoint statistics only when safepoint takes " \ 2458 "more than PrintSafepointSatisticsTimeout in millis") \ 2459 LP64_ONLY(range(-1, max_intx/MICROUNITS)) \ 2460 NOT_LP64(range(-1, max_intx)) \ 2461 \ 2462 product(bool, Inline, true, \ 2463 "Enable inlining") \ 2464 \ 2465 product(bool, ClipInlining, true, \ 2466 "Clip inlining if aggregate method exceeds DesiredMethodLimit") \ 2467 \ 2468 develop(bool, UseCHA, true, \ 2469 "Enable CHA") \ 2470 \ 2471 product(bool, UseTypeProfile, true, \ 2472 "Check interpreter profile for historically monomorphic calls") \ 2473 \ 2474 diagnostic(bool, PrintInlining, false, \ 2475 "Print inlining optimizations") \ 2476 \ 2477 product(bool, UsePopCountInstruction, false, \ 2478 "Use population count instruction") \ 2479 \ 2480 develop(bool, EagerInitialization, false, \ 2481 "Eagerly initialize classes if possible") \ 2482 \ 2483 diagnostic(bool, LogTouchedMethods, false, \ 2484 "Log methods which have been ever touched in runtime") \ 2485 \ 2486 diagnostic(bool, PrintTouchedMethodsAtExit, false, \ 2487 "Print all methods that have been ever touched in runtime") \ 2488 \ 2489 develop(bool, TraceMethodReplacement, false, \ 2490 "Print when methods are replaced do to recompilation") \ 2491 \ 2492 develop(bool, PrintMethodFlushing, false, \ 2493 "Print the nmethods being flushed") \ 2494 \ 2495 diagnostic(bool, PrintMethodFlushingStatistics, false, \ 2496 "print statistics about method flushing") \ 2497 \ 2498 diagnostic(intx, HotMethodDetectionLimit, 100000, \ 2499 "Number of compiled code invocations after which " \ 2500 "the method is considered as hot by the flusher") \ 2501 range(1, max_jint) \ 2502 \ 2503 diagnostic(intx, MinPassesBeforeFlush, 10, \ 2504 "Minimum number of sweeper passes before an nmethod " \ 2505 "can be flushed") \ 2506 range(0, max_intx) \ 2507 \ 2508 product(bool, UseCodeAging, true, \ 2509 "Insert counter to detect warm methods") \ 2510 \ 2511 diagnostic(bool, StressCodeAging, false, \ 2512 "Start with counters compiled in") \ 2513 \ 2514 develop(bool, UseRelocIndex, false, \ 2515 "Use an index to speed random access to relocations") \ 2516 \ 2517 develop(bool, StressCodeBuffers, false, \ 2518 "Exercise code buffer expansion and other rare state changes") \ 2519 \ 2520 diagnostic(bool, DebugNonSafepoints, trueInDebug, \ 2521 "Generate extra debugging information for non-safepoints in " \ 2522 "nmethods") \ 2523 \ 2524 product(bool, PrintVMOptions, false, \ 2525 "Print flags that appeared on the command line") \ 2526 \ 2527 product(bool, IgnoreUnrecognizedVMOptions, false, \ 2528 "Ignore unrecognized VM options") \ 2529 \ 2530 product(bool, PrintCommandLineFlags, false, \ 2531 "Print flags specified on command line or set by ergonomics") \ 2532 \ 2533 product(bool, PrintFlagsInitial, false, \ 2534 "Print all VM flags before argument processing and exit VM") \ 2535 \ 2536 product(bool, PrintFlagsFinal, false, \ 2537 "Print all VM flags after argument and ergonomic processing") \ 2538 \ 2539 notproduct(bool, PrintFlagsWithComments, false, \ 2540 "Print all VM flags with default values and descriptions and " \ 2541 "exit") \ 2542 \ 2543 product(bool, PrintFlagsRanges, false, \ 2544 "Print VM flags and their ranges and exit VM") \ 2545 \ 2546 diagnostic(bool, SerializeVMOutput, true, \ 2547 "Use a mutex to serialize output to tty and LogFile") \ 2548 \ 2549 diagnostic(bool, DisplayVMOutput, true, \ 2550 "Display all VM output on the tty, independently of LogVMOutput") \ 2551 \ 2552 diagnostic(bool, LogVMOutput, false, \ 2553 "Save VM output to LogFile") \ 2554 \ 2555 diagnostic(ccstr, LogFile, NULL, \ 2556 "If LogVMOutput or LogCompilation is on, save VM output to " \ 2557 "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ 2558 \ 2559 product(ccstr, ErrorFile, NULL, \ 2560 "If an error occurs, save the error data to this file " \ 2561 "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 2562 \ 2563 product(bool, DisplayVMOutputToStderr, false, \ 2564 "If DisplayVMOutput is true, display all VM output to stderr") \ 2565 \ 2566 product(bool, DisplayVMOutputToStdout, false, \ 2567 "If DisplayVMOutput is true, display all VM output to stdout") \ 2568 \ 2569 product(bool, UseHeavyMonitors, false, \ 2570 "use heavyweight instead of lightweight Java monitors") \ 2571 \ 2572 product(bool, PrintStringTableStatistics, false, \ 2573 "print statistics about the StringTable and SymbolTable") \ 2574 \ 2575 diagnostic(bool, VerifyStringTableAtExit, false, \ 2576 "verify StringTable contents at exit") \ 2577 \ 2578 notproduct(bool, PrintSymbolTableSizeHistogram, false, \ 2579 "print histogram of the symbol table") \ 2580 \ 2581 notproduct(bool, ExitVMOnVerifyError, false, \ 2582 "standard exit from VM if bytecode verify error " \ 2583 "(only in debug mode)") \ 2584 \ 2585 diagnostic(ccstr, AbortVMOnException, NULL, \ 2586 "Call fatal if this exception is thrown. Example: " \ 2587 "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \ 2588 \ 2589 diagnostic(ccstr, AbortVMOnExceptionMessage, NULL, \ 2590 "Call fatal if the exception pointed by AbortVMOnException " \ 2591 "has this message") \ 2592 \ 2593 develop(bool, DebugVtables, false, \ 2594 "add debugging code to vtable dispatch") \ 2595 \ 2596 notproduct(bool, PrintVtableStats, false, \ 2597 "print vtables stats at end of run") \ 2598 \ 2599 develop(bool, TraceCreateZombies, false, \ 2600 "trace creation of zombie nmethods") \ 2601 \ 2602 notproduct(bool, IgnoreLockingAssertions, false, \ 2603 "disable locking assertions (for speed)") \ 2604 \ 2605 product(bool, RangeCheckElimination, true, \ 2606 "Eliminate range checks") \ 2607 \ 2608 develop_pd(bool, UncommonNullCast, \ 2609 "track occurrences of null in casts; adjust compiler tactics") \ 2610 \ 2611 develop(bool, TypeProfileCasts, true, \ 2612 "treat casts like calls for purposes of type profiling") \ 2613 \ 2614 develop(bool, DelayCompilationDuringStartup, true, \ 2615 "Delay invoking the compiler until main application class is " \ 2616 "loaded") \ 2617 \ 2618 develop(bool, CompileTheWorld, false, \ 2619 "Compile all methods in all classes in bootstrap class path " \ 2620 "(stress test)") \ 2621 \ 2622 develop(bool, CompileTheWorldPreloadClasses, true, \ 2623 "Preload all classes used by a class before start loading") \ 2624 \ 2625 notproduct(intx, CompileTheWorldSafepointInterval, 100, \ 2626 "Force a safepoint every n compiles so sweeper can keep up") \ 2627 \ 2628 develop(bool, FillDelaySlots, true, \ 2629 "Fill delay slots (on SPARC only)") \ 2630 \ 2631 develop(bool, TimeLivenessAnalysis, false, \ 2632 "Time computation of bytecode liveness analysis") \ 2633 \ 2634 develop(bool, TraceLivenessGen, false, \ 2635 "Trace the generation of liveness analysis information") \ 2636 \ 2637 notproduct(bool, TraceLivenessQuery, false, \ 2638 "Trace queries of liveness analysis information") \ 2639 \ 2640 notproduct(bool, CollectIndexSetStatistics, false, \ 2641 "Collect information about IndexSets") \ 2642 \ 2643 develop(bool, UseLoopSafepoints, true, \ 2644 "Generate Safepoint nodes in every loop") \ 2645 \ 2646 develop(intx, FastAllocateSizeLimit, 128*K, \ 2647 /* Note: This value is zero mod 1<<13 for a cheap sparc set. */ \ 2648 "Inline allocations larger than this in doublewords must go slow")\ 2649 \ 2650 product(bool, AggressiveOpts, false, \ 2651 "Enable aggressive optimizations - see arguments.cpp") \ 2652 \ 2653 product_pd(bool, CompactStrings, \ 2654 "Enable Strings to use single byte chars in backing store") \ 2655 \ 2656 product_pd(uintx, TypeProfileLevel, \ 2657 "=XYZ, with Z: Type profiling of arguments at call; " \ 2658 "Y: Type profiling of return value at call; " \ 2659 "X: Type profiling of parameters to methods; " \ 2660 "X, Y and Z in 0=off ; 1=jsr292 only; 2=all methods") \ 2661 constraint(TypeProfileLevelConstraintFunc, AfterErgo) \ 2662 \ 2663 product(intx, TypeProfileArgsLimit, 2, \ 2664 "max number of call arguments to consider for type profiling") \ 2665 range(0, 16) \ 2666 \ 2667 product(intx, TypeProfileParmsLimit, 2, \ 2668 "max number of incoming parameters to consider for type profiling"\ 2669 ", -1 for all") \ 2670 range(-1, 64) \ 2671 \ 2672 /* statistics */ \ 2673 develop(bool, CountCompiledCalls, false, \ 2674 "Count method invocations") \ 2675 \ 2676 notproduct(bool, CountRuntimeCalls, false, \ 2677 "Count VM runtime calls") \ 2678 \ 2679 develop(bool, CountJNICalls, false, \ 2680 "Count jni method invocations") \ 2681 \ 2682 notproduct(bool, CountJVMCalls, false, \ 2683 "Count jvm method invocations") \ 2684 \ 2685 notproduct(bool, CountRemovableExceptions, false, \ 2686 "Count exceptions that could be replaced by branches due to " \ 2687 "inlining") \ 2688 \ 2689 notproduct(bool, ICMissHistogram, false, \ 2690 "Produce histogram of IC misses") \ 2691 \ 2692 /* interpreter */ \ 2693 product_pd(bool, RewriteBytecodes, \ 2694 "Allow rewriting of bytecodes (bytecodes are not immutable)") \ 2695 \ 2696 product_pd(bool, RewriteFrequentPairs, \ 2697 "Rewrite frequently used bytecode pairs into a single bytecode") \ 2698 \ 2699 diagnostic(bool, PrintInterpreter, false, \ 2700 "Print the generated interpreter code") \ 2701 \ 2702 product(bool, UseInterpreter, true, \ 2703 "Use interpreter for non-compiled methods") \ 2704 \ 2705 develop(bool, UseFastSignatureHandlers, true, \ 2706 "Use fast signature handlers for native calls") \ 2707 \ 2708 product(bool, UseLoopCounter, true, \ 2709 "Increment invocation counter on backward branch") \ 2710 \ 2711 product_pd(bool, UseOnStackReplacement, \ 2712 "Use on stack replacement, calls runtime if invoc. counter " \ 2713 "overflows in loop") \ 2714 \ 2715 notproduct(bool, TraceOnStackReplacement, false, \ 2716 "Trace on stack replacement") \ 2717 \ 2718 product_pd(bool, PreferInterpreterNativeStubs, \ 2719 "Use always interpreter stubs for native methods invoked via " \ 2720 "interpreter") \ 2721 \ 2722 develop(bool, CountBytecodes, false, \ 2723 "Count number of bytecodes executed") \ 2724 \ 2725 develop(bool, PrintBytecodeHistogram, false, \ 2726 "Print histogram of the executed bytecodes") \ 2727 \ 2728 develop(bool, PrintBytecodePairHistogram, false, \ 2729 "Print histogram of the executed bytecode pairs") \ 2730 \ 2731 diagnostic(bool, PrintSignatureHandlers, false, \ 2732 "Print code generated for native method signature handlers") \ 2733 \ 2734 develop(bool, VerifyOops, false, \ 2735 "Do plausibility checks for oops") \ 2736 \ 2737 develop(bool, CheckUnhandledOops, false, \ 2738 "Check for unhandled oops in VM code") \ 2739 \ 2740 develop(bool, VerifyJNIFields, trueInDebug, \ 2741 "Verify jfieldIDs for instance fields") \ 2742 \ 2743 notproduct(bool, VerifyJNIEnvThread, false, \ 2744 "Verify JNIEnv.thread == Thread::current() when entering VM " \ 2745 "from JNI") \ 2746 \ 2747 develop(bool, VerifyFPU, false, \ 2748 "Verify FPU state (check for NaN's, etc.)") \ 2749 \ 2750 develop(bool, VerifyThread, false, \ 2751 "Watch the thread register for corruption (SPARC only)") \ 2752 \ 2753 develop(bool, VerifyActivationFrameSize, false, \ 2754 "Verify that activation frame didn't become smaller than its " \ 2755 "minimal size") \ 2756 \ 2757 develop(bool, TraceFrequencyInlining, false, \ 2758 "Trace frequency based inlining") \ 2759 \ 2760 develop_pd(bool, InlineIntrinsics, \ 2761 "Inline intrinsics that can be statically resolved") \ 2762 \ 2763 product_pd(bool, ProfileInterpreter, \ 2764 "Profile at the bytecode level during interpretation") \ 2765 \ 2766 develop(bool, TraceProfileInterpreter, false, \ 2767 "Trace profiling at the bytecode level during interpretation. " \ 2768 "This outputs the profiling information collected to improve " \ 2769 "jit compilation.") \ 2770 \ 2771 develop_pd(bool, ProfileTraps, \ 2772 "Profile deoptimization traps at the bytecode level") \ 2773 \ 2774 product(intx, ProfileMaturityPercentage, 20, \ 2775 "number of method invocations/branches (expressed as % of " \ 2776 "CompileThreshold) before using the method's profile") \ 2777 range(0, 100) \ 2778 \ 2779 diagnostic(bool, PrintMethodData, false, \ 2780 "Print the results of +ProfileInterpreter at end of run") \ 2781 \ 2782 develop(bool, VerifyDataPointer, trueInDebug, \ 2783 "Verify the method data pointer during interpreter profiling") \ 2784 \ 2785 develop(bool, VerifyCompiledCode, false, \ 2786 "Include miscellaneous runtime verifications in nmethod code; " \ 2787 "default off because it disturbs nmethod size heuristics") \ 2788 \ 2789 notproduct(bool, CrashGCForDumpingJavaThread, false, \ 2790 "Manually make GC thread crash then dump java stack trace; " \ 2791 "Test only") \ 2792 \ 2793 /* compilation */ \ 2794 product(bool, UseCompiler, true, \ 2795 "Use Just-In-Time compilation") \ 2796 \ 2797 develop(bool, TraceCompilationPolicy, false, \ 2798 "Trace compilation policy") \ 2799 \ 2800 develop(bool, TimeCompilationPolicy, false, \ 2801 "Time the compilation policy") \ 2802 \ 2803 product(bool, UseCounterDecay, true, \ 2804 "Adjust recompilation counters") \ 2805 \ 2806 develop(intx, CounterHalfLifeTime, 30, \ 2807 "Half-life time of invocation counters (in seconds)") \ 2808 \ 2809 develop(intx, CounterDecayMinIntervalLength, 500, \ 2810 "The minimum interval (in milliseconds) between invocation of " \ 2811 "CounterDecay") \ 2812 \ 2813 product(bool, AlwaysCompileLoopMethods, false, \ 2814 "When using recompilation, never interpret methods " \ 2815 "containing loops") \ 2816 \ 2817 product(bool, DontCompileHugeMethods, true, \ 2818 "Do not compile methods > HugeMethodLimit") \ 2819 \ 2820 /* Bytecode escape analysis estimation. */ \ 2821 product(bool, EstimateArgEscape, true, \ 2822 "Analyze bytecodes to estimate escape state of arguments") \ 2823 \ 2824 product(intx, BCEATraceLevel, 0, \ 2825 "How much tracing to do of bytecode escape analysis estimates " \ 2826 "(0-3)") \ 2827 range(0, 3) \ 2828 \ 2829 product(intx, MaxBCEAEstimateLevel, 5, \ 2830 "Maximum number of nested calls that are analyzed by BC EA") \ 2831 range(0, max_jint) \ 2832 \ 2833 product(intx, MaxBCEAEstimateSize, 150, \ 2834 "Maximum bytecode size of a method to be analyzed by BC EA") \ 2835 range(0, max_jint) \ 2836 \ 2837 product(intx, AllocatePrefetchStyle, 1, \ 2838 "0 = no prefetch, " \ 2839 "1 = generate prefetch instructions for each allocation, " \ 2840 "2 = use TLAB watermark to gate allocation prefetch, " \ 2841 "3 = generate one prefetch instruction per cache line") \ 2842 range(0, 3) \ 2843 \ 2844 product(intx, AllocatePrefetchDistance, -1, \ 2845 "Distance to prefetch ahead of allocation pointer. " \ 2846 "-1: use system-specific value (automatically determined") \ 2847 constraint(AllocatePrefetchDistanceConstraintFunc, AfterMemoryInit)\ 2848 \ 2849 product(intx, AllocatePrefetchLines, 3, \ 2850 "Number of lines to prefetch ahead of array allocation pointer") \ 2851 range(1, 64) \ 2852 \ 2853 product(intx, AllocateInstancePrefetchLines, 1, \ 2854 "Number of lines to prefetch ahead of instance allocation " \ 2855 "pointer") \ 2856 range(1, 64) \ 2857 \ 2858 product(intx, AllocatePrefetchStepSize, 16, \ 2859 "Step size in bytes of sequential prefetch instructions") \ 2860 range(1, 512) \ 2861 constraint(AllocatePrefetchStepSizeConstraintFunc,AfterMemoryInit)\ 2862 \ 2863 product(intx, AllocatePrefetchInstr, 0, \ 2864 "Select instruction to prefetch ahead of allocation pointer") \ 2865 constraint(AllocatePrefetchInstrConstraintFunc, AfterMemoryInit) \ 2866 \ 2867 /* deoptimization */ \ 2868 develop(bool, TraceDeoptimization, false, \ 2869 "Trace deoptimization") \ 2870 \ 2871 develop(bool, PrintDeoptimizationDetails, false, \ 2872 "Print more information about deoptimization") \ 2873 \ 2874 develop(bool, DebugDeoptimization, false, \ 2875 "Tracing various information while debugging deoptimization") \ 2876 \ 2877 product(intx, SelfDestructTimer, 0, \ 2878 "Will cause VM to terminate after a given time (in minutes) " \ 2879 "(0 means off)") \ 2880 range(0, max_intx) \ 2881 \ 2882 product(intx, MaxJavaStackTraceDepth, 1024, \ 2883 "The maximum number of lines in the stack trace for Java " \ 2884 "exceptions (0 means all)") \ 2885 range(0, max_jint/2) \ 2886 \ 2887 develop(bool, TraceStackWalk, false, \ 2888 "Trace stack walking") \ 2889 \ 2890 /* notice: the max range value here is max_jint, not max_intx */ \ 2891 /* because of overflow issue */ \ 2892 diagnostic(intx, GuaranteedSafepointInterval, 1000, \ 2893 "Guarantee a safepoint (at least) every so many milliseconds " \ 2894 "(0 means none)") \ 2895 range(0, max_jint) \ 2896 \ 2897 product(intx, SafepointTimeoutDelay, 10000, \ 2898 "Delay in milliseconds for option SafepointTimeout") \ 2899 LP64_ONLY(range(0, max_intx/MICROUNITS)) \ 2900 NOT_LP64(range(0, max_intx)) \ 2901 \ 2902 product(intx, NmethodSweepActivity, 10, \ 2903 "Removes cold nmethods from code cache if > 0. Higher values " \ 2904 "result in more aggressive sweeping") \ 2905 range(0, 2000) \ 2906 \ 2907 notproduct(bool, LogSweeper, false, \ 2908 "Keep a ring buffer of sweeper activity") \ 2909 \ 2910 notproduct(intx, SweeperLogEntries, 1024, \ 2911 "Number of records in the ring buffer of sweeper activity") \ 2912 \ 2913 notproduct(intx, MemProfilingInterval, 500, \ 2914 "Time between each invocation of the MemProfiler") \ 2915 \ 2916 develop(intx, MallocCatchPtr, -1, \ 2917 "Hit breakpoint when mallocing/freeing this pointer") \ 2918 \ 2919 notproduct(ccstrlist, SuppressErrorAt, "", \ 2920 "List of assertions (file:line) to muzzle") \ 2921 \ 2922 develop(intx, StackPrintLimit, 100, \ 2923 "number of stack frames to print in VM-level stack dump") \ 2924 \ 2925 notproduct(intx, MaxElementPrintSize, 256, \ 2926 "maximum number of elements to print") \ 2927 \ 2928 notproduct(intx, MaxSubklassPrintSize, 4, \ 2929 "maximum number of subklasses to print when printing klass") \ 2930 \ 2931 product(intx, MaxInlineLevel, 9, \ 2932 "maximum number of nested calls that are inlined") \ 2933 range(0, max_jint) \ 2934 \ 2935 product(intx, MaxRecursiveInlineLevel, 1, \ 2936 "maximum number of nested recursive calls that are inlined") \ 2937 range(0, max_jint) \ 2938 \ 2939 develop(intx, MaxForceInlineLevel, 100, \ 2940 "maximum number of nested calls that are forced for inlining " \ 2941 "(using CompileCommand or marked w/ @ForceInline)") \ 2942 range(0, max_jint) \ 2943 \ 2944 product_pd(intx, InlineSmallCode, \ 2945 "Only inline already compiled methods if their code size is " \ 2946 "less than this") \ 2947 range(0, max_jint) \ 2948 \ 2949 product(intx, MaxInlineSize, 35, \ 2950 "The maximum bytecode size of a method to be inlined") \ 2951 range(0, max_jint) \ 2952 \ 2953 product_pd(intx, FreqInlineSize, \ 2954 "The maximum bytecode size of a frequent method to be inlined") \ 2955 range(0, max_jint) \ 2956 \ 2957 product(intx, MaxTrivialSize, 6, \ 2958 "The maximum bytecode size of a trivial method to be inlined") \ 2959 range(0, max_jint) \ 2960 \ 2961 product(intx, MinInliningThreshold, 250, \ 2962 "The minimum invocation count a method needs to have to be " \ 2963 "inlined") \ 2964 range(0, max_jint) \ 2965 \ 2966 develop(intx, MethodHistogramCutoff, 100, \ 2967 "The cutoff value for method invocation histogram (+CountCalls)") \ 2968 \ 2969 diagnostic(intx, ProfilerNumberOfInterpretedMethods, 25, \ 2970 "Number of interpreted methods to show in profile") \ 2971 \ 2972 diagnostic(intx, ProfilerNumberOfCompiledMethods, 25, \ 2973 "Number of compiled methods to show in profile") \ 2974 \ 2975 diagnostic(intx, ProfilerNumberOfStubMethods, 25, \ 2976 "Number of stub methods to show in profile") \ 2977 \ 2978 diagnostic(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ 2979 "Number of runtime stub nodes to show in profile") \ 2980 \ 2981 product(intx, ProfileIntervalsTicks, 100, \ 2982 "Number of ticks between printing of interval profile " \ 2983 "(+ProfileIntervals)") \ 2984 range(0, max_intx) \ 2985 \ 2986 notproduct(intx, ScavengeALotInterval, 1, \ 2987 "Interval between which scavenge will occur with +ScavengeALot") \ 2988 \ 2989 notproduct(intx, FullGCALotInterval, 1, \ 2990 "Interval between which full gc will occur with +FullGCALot") \ 2991 \ 2992 notproduct(intx, FullGCALotStart, 0, \ 2993 "For which invocation to start FullGCAlot") \ 2994 \ 2995 notproduct(intx, FullGCALotDummies, 32*K, \ 2996 "Dummy object allocated with +FullGCALot, forcing all objects " \ 2997 "to move") \ 2998 \ 2999 develop(intx, DontYieldALotInterval, 10, \ 3000 "Interval between which yields will be dropped (milliseconds)") \ 3001 \ 3002 develop(intx, MinSleepInterval, 1, \ 3003 "Minimum sleep() interval (milliseconds) when " \ 3004 "ConvertSleepToYield is off (used for Solaris)") \ 3005 \ 3006 develop(intx, ProfilerPCTickThreshold, 15, \ 3007 "Number of ticks in a PC buckets to be a hotspot") \ 3008 \ 3009 notproduct(intx, DeoptimizeALotInterval, 5, \ 3010 "Number of exits until DeoptimizeALot kicks in") \ 3011 \ 3012 notproduct(intx, ZombieALotInterval, 5, \ 3013 "Number of exits until ZombieALot kicks in") \ 3014 \ 3015 diagnostic(uintx, MallocMaxTestWords, 0, \ 3016 "If non-zero, maximum number of words that malloc/realloc can " \ 3017 "allocate (for testing only)") \ 3018 range(0, max_uintx) \ 3019 \ 3020 product(intx, TypeProfileWidth, 2, \ 3021 "Number of receiver types to record in call/cast profile") \ 3022 range(0, 8) \ 3023 \ 3024 develop(intx, BciProfileWidth, 2, \ 3025 "Number of return bci's to record in ret profile") \ 3026 \ 3027 product(intx, PerMethodRecompilationCutoff, 400, \ 3028 "After recompiling N times, stay in the interpreter (-1=>'Inf')") \ 3029 range(-1, max_intx) \ 3030 \ 3031 product(intx, PerBytecodeRecompilationCutoff, 200, \ 3032 "Per-BCI limit on repeated recompilation (-1=>'Inf')") \ 3033 range(-1, max_intx) \ 3034 \ 3035 product(intx, PerMethodTrapLimit, 100, \ 3036 "Limit on traps (of one kind) in a method (includes inlines)") \ 3037 range(0, max_jint) \ 3038 \ 3039 experimental(intx, PerMethodSpecTrapLimit, 5000, \ 3040 "Limit on speculative traps (of one kind) in a method " \ 3041 "(includes inlines)") \ 3042 range(0, max_jint) \ 3043 \ 3044 product(intx, PerBytecodeTrapLimit, 4, \ 3045 "Limit on traps (of one kind) at a particular BCI") \ 3046 range(0, max_jint) \ 3047 \ 3048 experimental(intx, SpecTrapLimitExtraEntries, 3, \ 3049 "Extra method data trap entries for speculation") \ 3050 \ 3051 develop(intx, InlineFrequencyRatio, 20, \ 3052 "Ratio of call site execution to caller method invocation") \ 3053 range(0, max_jint) \ 3054 \ 3055 diagnostic_pd(intx, InlineFrequencyCount, \ 3056 "Count of call site execution necessary to trigger frequent " \ 3057 "inlining") \ 3058 range(0, max_jint) \ 3059 \ 3060 develop(intx, InlineThrowCount, 50, \ 3061 "Force inlining of interpreted methods that throw this often") \ 3062 range(0, max_jint) \ 3063 \ 3064 develop(intx, InlineThrowMaxSize, 200, \ 3065 "Force inlining of throwing methods smaller than this") \ 3066 range(0, max_jint) \ 3067 \ 3068 develop(intx, ProfilerNodeSize, 1024, \ 3069 "Size in K to allocate for the Profile Nodes of each thread") \ 3070 range(0, 1024) \ 3071 \ 3072 /* gc parameters */ \ 3073 product(size_t, InitialHeapSize, 0, \ 3074 "Initial heap size (in bytes); zero means use ergonomics") \ 3075 constraint(InitialHeapSizeConstraintFunc,AfterErgo) \ 3076 \ 3077 product(size_t, MaxHeapSize, ScaleForWordSize(96*M), \ 3078 "Maximum heap size (in bytes)") \ 3079 constraint(MaxHeapSizeConstraintFunc,AfterErgo) \ 3080 \ 3081 product(size_t, OldSize, ScaleForWordSize(4*M), \ 3082 "Initial tenured generation size (in bytes)") \ 3083 range(0, max_uintx) \ 3084 \ 3085 product(size_t, NewSize, ScaleForWordSize(1*M), \ 3086 "Initial new generation size (in bytes)") \ 3087 constraint(NewSizeConstraintFunc,AfterErgo) \ 3088 \ 3089 product(size_t, MaxNewSize, max_uintx, \ 3090 "Maximum new generation size (in bytes), max_uintx means set " \ 3091 "ergonomically") \ 3092 range(0, max_uintx) \ 3093 \ 3094 product_pd(size_t, HeapBaseMinAddress, \ 3095 "OS specific low limit for heap base address") \ 3096 constraint(HeapBaseMinAddressConstraintFunc,AfterErgo) \ 3097 \ 3098 product(size_t, PretenureSizeThreshold, 0, \ 3099 "Maximum size in bytes of objects allocated in DefNew " \ 3100 "generation; zero means no maximum") \ 3101 range(0, max_uintx) \ 3102 \ 3103 product(size_t, MinTLABSize, 2*K, \ 3104 "Minimum allowed TLAB size (in bytes)") \ 3105 range(1, max_uintx/2) \ 3106 constraint(MinTLABSizeConstraintFunc,AfterMemoryInit) \ 3107 \ 3108 product(size_t, TLABSize, 0, \ 3109 "Starting TLAB size (in bytes); zero means set ergonomically") \ 3110 constraint(TLABSizeConstraintFunc,AfterMemoryInit) \ 3111 \ 3112 product(size_t, YoungPLABSize, 4096, \ 3113 "Size of young gen promotion LAB's (in HeapWords)") \ 3114 constraint(YoungPLABSizeConstraintFunc,AfterMemoryInit) \ 3115 \ 3116 product(size_t, OldPLABSize, 1024, \ 3117 "Size of old gen promotion LAB's (in HeapWords), or Number " \ 3118 "of blocks to attempt to claim when refilling CMS LAB's") \ 3119 constraint(OldPLABSizeConstraintFunc,AfterMemoryInit) \ 3120 \ 3121 product(uintx, TLABAllocationWeight, 35, \ 3122 "Allocation averaging weight") \ 3123 range(0, 100) \ 3124 \ 3125 /* Limit the lower bound of this flag to 1 as it is used */ \ 3126 /* in a division expression. */ \ 3127 product(uintx, TLABWasteTargetPercent, 1, \ 3128 "Percentage of Eden that can be wasted") \ 3129 range(1, 100) \ 3130 \ 3131 product(uintx, TLABRefillWasteFraction, 64, \ 3132 "Maximum TLAB waste at a refill (internal fragmentation)") \ 3133 range(1, max_juint) \ 3134 \ 3135 product(uintx, TLABWasteIncrement, 4, \ 3136 "Increment allowed waste at slow allocation") \ 3137 range(0, max_jint) \ 3138 constraint(TLABWasteIncrementConstraintFunc,AfterMemoryInit) \ 3139 \ 3140 product(uintx, SurvivorRatio, 8, \ 3141 "Ratio of eden/survivor space size") \ 3142 range(1, max_uintx-2) \ 3143 constraint(SurvivorRatioConstraintFunc,AfterMemoryInit) \ 3144 \ 3145 product(uintx, NewRatio, 2, \ 3146 "Ratio of old/new generation sizes") \ 3147 range(0, max_uintx-1) \ 3148 \ 3149 product_pd(size_t, NewSizeThreadIncrease, \ 3150 "Additional size added to desired new generation size per " \ 3151 "non-daemon thread (in bytes)") \ 3152 range(0, max_uintx) \ 3153 \ 3154 product_pd(size_t, MetaspaceSize, \ 3155 "Initial threshold (in bytes) at which a garbage collection " \ 3156 "is done to reduce Metaspace usage") \ 3157 constraint(MetaspaceSizeConstraintFunc,AfterErgo) \ 3158 \ 3159 product(size_t, MaxMetaspaceSize, max_uintx, \ 3160 "Maximum size of Metaspaces (in bytes)") \ 3161 constraint(MaxMetaspaceSizeConstraintFunc,AfterErgo) \ 3162 \ 3163 product(size_t, CompressedClassSpaceSize, 1*G, \ 3164 "Maximum size of class area in Metaspace when compressed " \ 3165 "class pointers are used") \ 3166 range(1*M, 3*G) \ 3167 \ 3168 manageable(uintx, MinHeapFreeRatio, 40, \ 3169 "The minimum percentage of heap free after GC to avoid expansion."\ 3170 " For most GCs this applies to the old generation. In G1 and" \ 3171 " ParallelGC it applies to the whole heap.") \ 3172 range(0, 100) \ 3173 constraint(MinHeapFreeRatioConstraintFunc,AfterErgo) \ 3174 \ 3175 manageable(uintx, MaxHeapFreeRatio, 70, \ 3176 "The maximum percentage of heap free after GC to avoid shrinking."\ 3177 " For most GCs this applies to the old generation. In G1 and" \ 3178 " ParallelGC it applies to the whole heap.") \ 3179 range(0, 100) \ 3180 constraint(MaxHeapFreeRatioConstraintFunc,AfterErgo) \ 3181 \ 3182 product(bool, ShrinkHeapInSteps, true, \ 3183 "When disabled, informs the GC to shrink the java heap directly" \ 3184 " to the target size at the next full GC rather than requiring" \ 3185 " smaller steps during multiple full GCs.") \ 3186 \ 3187 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ 3188 "Number of milliseconds per MB of free space in the heap") \ 3189 range(0, max_intx) \ 3190 constraint(SoftRefLRUPolicyMSPerMBConstraintFunc,AfterMemoryInit) \ 3191 \ 3192 product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ 3193 "The minimum change in heap space due to GC (in bytes)") \ 3194 range(0, max_uintx) \ 3195 \ 3196 product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ 3197 "The minimum expansion of Metaspace (in bytes)") \ 3198 range(0, max_uintx) \ 3199 \ 3200 product(uintx, MaxMetaspaceFreeRatio, 70, \ 3201 "The maximum percentage of Metaspace free after GC to avoid " \ 3202 "shrinking") \ 3203 range(0, 100) \ 3204 constraint(MaxMetaspaceFreeRatioConstraintFunc,AfterErgo) \ 3205 \ 3206 product(uintx, MinMetaspaceFreeRatio, 40, \ 3207 "The minimum percentage of Metaspace free after GC to avoid " \ 3208 "expansion") \ 3209 range(0, 99) \ 3210 constraint(MinMetaspaceFreeRatioConstraintFunc,AfterErgo) \ 3211 \ 3212 product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ 3213 "The maximum expansion of Metaspace without full GC (in bytes)") \ 3214 range(0, max_uintx) \ 3215 \ 3216 product(uintx, QueuedAllocationWarningCount, 0, \ 3217 "Number of times an allocation that queues behind a GC " \ 3218 "will retry before printing a warning") \ 3219 range(0, max_uintx) \ 3220 \ 3221 diagnostic(uintx, VerifyGCStartAt, 0, \ 3222 "GC invoke count where +VerifyBefore/AfterGC kicks in") \ 3223 range(0, max_uintx) \ 3224 \ 3225 diagnostic(intx, VerifyGCLevel, 0, \ 3226 "Generation level at which to start +VerifyBefore/AfterGC") \ 3227 range(0, 1) \ 3228 \ 3229 product(uintx, MaxTenuringThreshold, 15, \ 3230 "Maximum value for tenuring threshold") \ 3231 range(0, markOopDesc::max_age + 1) \ 3232 constraint(MaxTenuringThresholdConstraintFunc,AfterErgo) \ 3233 \ 3234 product(uintx, InitialTenuringThreshold, 7, \ 3235 "Initial value for tenuring threshold") \ 3236 range(0, markOopDesc::max_age + 1) \ 3237 constraint(InitialTenuringThresholdConstraintFunc,AfterErgo) \ 3238 \ 3239 product(uintx, TargetSurvivorRatio, 50, \ 3240 "Desired percentage of survivor space used after scavenge") \ 3241 range(0, 100) \ 3242 \ 3243 product(uintx, MarkSweepDeadRatio, 5, \ 3244 "Percentage (0-100) of the old gen allowed as dead wood. " \ 3245 "Serial mark sweep treats this as both the minimum and maximum " \ 3246 "value. " \ 3247 "CMS uses this value only if it falls back to mark sweep. " \ 3248 "Par compact uses a variable scale based on the density of the " \ 3249 "generation and treats this as the maximum value when the heap " \ 3250 "is either completely full or completely empty. Par compact " \ 3251 "also has a smaller default value; see arguments.cpp.") \ 3252 range(0, 100) \ 3253 \ 3254 product(uint, MarkSweepAlwaysCompactCount, 4, \ 3255 "How often should we fully compact the heap (ignoring the dead " \ 3256 "space parameters)") \ 3257 range(1, max_juint) \ 3258 \ 3259 develop(uintx, GCExpandToAllocateDelayMillis, 0, \ 3260 "Delay between expansion and allocation (in milliseconds)") \ 3261 \ 3262 develop(uintx, GCWorkerDelayMillis, 0, \ 3263 "Delay in scheduling GC workers (in milliseconds)") \ 3264 \ 3265 product(intx, DeferThrSuspendLoopCount, 4000, \ 3266 "(Unstable) Number of times to iterate in safepoint loop " \ 3267 "before blocking VM threads ") \ 3268 range(-1, max_jint-1) \ 3269 \ 3270 product(intx, DeferPollingPageLoopCount, -1, \ 3271 "(Unsafe,Unstable) Number of iterations in safepoint loop " \ 3272 "before changing safepoint polling page to RO ") \ 3273 range(-1, max_jint-1) \ 3274 \ 3275 product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 3276 range(0, max_intx) \ 3277 \ 3278 product(bool, PSChunkLargeArrays, true, \ 3279 "Process large arrays in chunks") \ 3280 \ 3281 product(uintx, GCDrainStackTargetSize, 64, \ 3282 "Number of entries we will try to leave on the stack " \ 3283 "during parallel gc") \ 3284 range(0, max_juint) \ 3285 \ 3286 /* stack parameters */ \ 3287 product_pd(intx, StackYellowPages, \ 3288 "Number of yellow zone (recoverable overflows) pages of size " \ 3289 "4KB. If pages are bigger yellow zone is aligned up.") \ 3290 range(MIN_STACK_YELLOW_PAGES, (DEFAULT_STACK_YELLOW_PAGES+5)) \ 3291 \ 3292 product_pd(intx, StackRedPages, \ 3293 "Number of red zone (unrecoverable overflows) pages of size " \ 3294 "4KB. If pages are bigger red zone is aligned up.") \ 3295 range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2)) \ 3296 \ 3297 product_pd(intx, StackReservedPages, \ 3298 "Number of reserved zone (reserved to annotated methods) pages" \ 3299 " of size 4KB. If pages are bigger reserved zone is aligned up.") \ 3300 range(MIN_STACK_RESERVED_PAGES, (DEFAULT_STACK_RESERVED_PAGES+10))\ 3301 \ 3302 product(bool, RestrictReservedStack, true, \ 3303 "Restrict @ReservedStackAccess to trusted classes") \ 3304 \ 3305 /* greater stack shadow pages can't generate instruction to bang stack */ \ 3306 product_pd(intx, StackShadowPages, \ 3307 "Number of shadow zone (for overflow checking) pages of size " \ 3308 "4KB. If pages are bigger shadow zone is aligned up. " \ 3309 "This should exceed the depth of the VM and native call stack.") \ 3310 range(MIN_STACK_SHADOW_PAGES, (DEFAULT_STACK_SHADOW_PAGES+30)) \ 3311 \ 3312 product_pd(intx, ThreadStackSize, \ 3313 "Thread Stack Size (in Kbytes)") \ 3314 range(0, (max_intx-os::vm_page_size())/(1 * K)) \ 3315 \ 3316 product_pd(intx, VMThreadStackSize, \ 3317 "Non-Java Thread Stack Size (in Kbytes)") \ 3318 range(0, max_intx/(1 * K)) \ 3319 \ 3320 product_pd(intx, CompilerThreadStackSize, \ 3321 "Compiler Thread Stack Size (in Kbytes)") \ 3322 range(0, max_intx/(1 * K)) \ 3323 \ 3324 develop_pd(size_t, JVMInvokeMethodSlack, \ 3325 "Stack space (bytes) required for JVM_InvokeMethod to complete") \ 3326 \ 3327 /* code cache parameters */ \ 3328 /* ppc64/tiered compilation has large code-entry alignment. */ \ 3329 develop(uintx, CodeCacheSegmentSize, \ 3330 64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)), \ 3331 "Code cache segment size (in bytes) - smallest unit of " \ 3332 "allocation") \ 3333 range(1, 1024) \ 3334 constraint(CodeCacheSegmentSizeConstraintFunc, AfterErgo) \ 3335 \ 3336 develop_pd(intx, CodeEntryAlignment, \ 3337 "Code entry alignment for generated code (in bytes)") \ 3338 constraint(CodeEntryAlignmentConstraintFunc, AfterErgo) \ 3339 \ 3340 product_pd(intx, OptoLoopAlignment, \ 3341 "Align inner loops to zero relative to this modulus") \ 3342 range(1, 16) \ 3343 constraint(OptoLoopAlignmentConstraintFunc, AfterErgo) \ 3344 \ 3345 product_pd(uintx, InitialCodeCacheSize, \ 3346 "Initial code cache size (in bytes)") \ 3347 range(0, max_uintx) \ 3348 \ 3349 develop_pd(uintx, CodeCacheMinimumUseSpace, \ 3350 "Minimum code cache size (in bytes) required to start VM.") \ 3351 range(0, max_uintx) \ 3352 \ 3353 product(bool, SegmentedCodeCache, false, \ 3354 "Use a segmented code cache") \ 3355 \ 3356 product_pd(uintx, ReservedCodeCacheSize, \ 3357 "Reserved code cache size (in bytes) - maximum code cache size") \ 3358 range(0, max_uintx) \ 3359 \ 3360 product_pd(uintx, NonProfiledCodeHeapSize, \ 3361 "Size of code heap with non-profiled methods (in bytes)") \ 3362 range(0, max_uintx) \ 3363 \ 3364 product_pd(uintx, ProfiledCodeHeapSize, \ 3365 "Size of code heap with profiled methods (in bytes)") \ 3366 range(0, max_uintx) \ 3367 \ 3368 product_pd(uintx, NonNMethodCodeHeapSize, \ 3369 "Size of code heap with non-nmethods (in bytes)") \ 3370 range(0, max_uintx) \ 3371 \ 3372 product_pd(uintx, CodeCacheExpansionSize, \ 3373 "Code cache expansion size (in bytes)") \ 3374 range(0, max_uintx) \ 3375 \ 3376 develop_pd(uintx, CodeCacheMinBlockLength, \ 3377 "Minimum number of segments in a code cache block") \ 3378 range(1, 100) \ 3379 \ 3380 notproduct(bool, ExitOnFullCodeCache, false, \ 3381 "Exit the VM if we fill the code cache") \ 3382 \ 3383 product(bool, UseCodeCacheFlushing, true, \ 3384 "Remove cold/old nmethods from the code cache") \ 3385 \ 3386 product(uintx, StartAggressiveSweepingAt, 10, \ 3387 "Start aggressive sweeping if X[%] of the code cache is free." \ 3388 "Segmented code cache: X[%] of the non-profiled heap." \ 3389 "Non-segmented code cache: X[%] of the total code cache") \ 3390 range(0, 100) \ 3391 \ 3392 /* interpreter debugging */ \ 3393 develop(intx, BinarySwitchThreshold, 5, \ 3394 "Minimal number of lookupswitch entries for rewriting to binary " \ 3395 "switch") \ 3396 \ 3397 develop(intx, StopInterpreterAt, 0, \ 3398 "Stop interpreter execution at specified bytecode number") \ 3399 \ 3400 develop(intx, TraceBytecodesAt, 0, \ 3401 "Trace bytecodes starting with specified bytecode number") \ 3402 \ 3403 /* compiler interface */ \ 3404 develop(intx, CIStart, 0, \ 3405 "The id of the first compilation to permit") \ 3406 \ 3407 develop(intx, CIStop, max_jint, \ 3408 "The id of the last compilation to permit") \ 3409 \ 3410 develop(intx, CIStartOSR, 0, \ 3411 "The id of the first osr compilation to permit " \ 3412 "(CICountOSR must be on)") \ 3413 \ 3414 develop(intx, CIStopOSR, max_jint, \ 3415 "The id of the last osr compilation to permit " \ 3416 "(CICountOSR must be on)") \ 3417 \ 3418 develop(intx, CIBreakAtOSR, -1, \ 3419 "The id of osr compilation to break at") \ 3420 \ 3421 develop(intx, CIBreakAt, -1, \ 3422 "The id of compilation to break at") \ 3423 \ 3424 product(ccstrlist, CompileOnly, "", \ 3425 "List of methods (pkg/class.name) to restrict compilation to") \ 3426 \ 3427 product(ccstr, CompileCommandFile, NULL, \ 3428 "Read compiler commands from this file [.hotspot_compiler]") \ 3429 \ 3430 diagnostic(ccstr, CompilerDirectivesFile, NULL, \ 3431 "Read compiler directives from this file") \ 3432 \ 3433 product(ccstrlist, CompileCommand, "", \ 3434 "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \ 3435 \ 3436 develop(bool, ReplayCompiles, false, \ 3437 "Enable replay of compilations from ReplayDataFile") \ 3438 \ 3439 product(ccstr, ReplayDataFile, NULL, \ 3440 "File containing compilation replay information" \ 3441 "[default: ./replay_pid%p.log] (%p replaced with pid)") \ 3442 \ 3443 product(ccstr, InlineDataFile, NULL, \ 3444 "File containing inlining replay information" \ 3445 "[default: ./inline_pid%p.log] (%p replaced with pid)") \ 3446 \ 3447 develop(intx, ReplaySuppressInitializers, 2, \ 3448 "Control handling of class initialization during replay: " \ 3449 "0 - don't do anything special; " \ 3450 "1 - treat all class initializers as empty; " \ 3451 "2 - treat class initializers for application classes as empty; " \ 3452 "3 - allow all class initializers to run during bootstrap but " \ 3453 " pretend they are empty after starting replay") \ 3454 range(0, 3) \ 3455 \ 3456 develop(bool, ReplayIgnoreInitErrors, false, \ 3457 "Ignore exceptions thrown during initialization for replay") \ 3458 \ 3459 product(bool, DumpReplayDataOnError, true, \ 3460 "Record replay data for crashing compiler threads") \ 3461 \ 3462 product(bool, CICompilerCountPerCPU, false, \ 3463 "1 compiler thread for log(N CPUs)") \ 3464 \ 3465 develop(intx, CIFireOOMAt, -1, \ 3466 "Fire OutOfMemoryErrors throughout CI for testing the compiler " \ 3467 "(non-negative value throws OOM after this many CI accesses " \ 3468 "in each compile)") \ 3469 notproduct(intx, CICrashAt, -1, \ 3470 "id of compilation to trigger assert in compiler thread for " \ 3471 "the purpose of testing, e.g. generation of replay data") \ 3472 notproduct(bool, CIObjectFactoryVerify, false, \ 3473 "enable potentially expensive verification in ciObjectFactory") \ 3474 \ 3475 /* Priorities */ \ 3476 product_pd(bool, UseThreadPriorities, "Use native thread priorities") \ 3477 \ 3478 product(intx, ThreadPriorityPolicy, 0, \ 3479 "0 : Normal. "\ 3480 " VM chooses priorities that are appropriate for normal "\ 3481 " applications. On Solaris NORM_PRIORITY and above are mapped "\ 3482 " to normal native priority. Java priorities below " \ 3483 " NORM_PRIORITY map to lower native priority values. On "\ 3484 " Windows applications are allowed to use higher native "\ 3485 " priorities. However, with ThreadPriorityPolicy=0, VM will "\ 3486 " not use the highest possible native priority, "\ 3487 " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\ 3488 " system threads. On Linux thread priorities are ignored "\ 3489 " because the OS does not support static priority in "\ 3490 " SCHED_OTHER scheduling class which is the only choice for "\ 3491 " non-root, non-realtime applications. "\ 3492 "1 : Aggressive. "\ 3493 " Java thread priorities map over to the entire range of "\ 3494 " native thread priorities. Higher Java thread priorities map "\ 3495 " to higher native thread priorities. This policy should be "\ 3496 " used with care, as sometimes it can cause performance "\ 3497 " degradation in the application and/or the entire system. On "\ 3498 " Linux this policy requires root privilege.") \ 3499 range(0, 1) \ 3500 \ 3501 product(bool, ThreadPriorityVerbose, false, \ 3502 "Print priority changes") \ 3503 \ 3504 product(intx, CompilerThreadPriority, -1, \ 3505 "The native priority at which compiler threads should run " \ 3506 "(-1 means no change)") \ 3507 range(min_jint, max_jint) \ 3508 constraint(CompilerThreadPriorityConstraintFunc, AfterErgo) \ 3509 \ 3510 product(intx, VMThreadPriority, -1, \ 3511 "The native priority at which the VM thread should run " \ 3512 "(-1 means no change)") \ 3513 range(-1, 127) \ 3514 \ 3515 product(bool, CompilerThreadHintNoPreempt, true, \ 3516 "(Solaris only) Give compiler threads an extra quanta") \ 3517 \ 3518 product(bool, VMThreadHintNoPreempt, false, \ 3519 "(Solaris only) Give VM thread an extra quanta") \ 3520 \ 3521 product(intx, JavaPriority1_To_OSPriority, -1, \ 3522 "Map Java priorities to OS priorities") \ 3523 range(-1, 127) \ 3524 \ 3525 product(intx, JavaPriority2_To_OSPriority, -1, \ 3526 "Map Java priorities to OS priorities") \ 3527 range(-1, 127) \ 3528 \ 3529 product(intx, JavaPriority3_To_OSPriority, -1, \ 3530 "Map Java priorities to OS priorities") \ 3531 range(-1, 127) \ 3532 \ 3533 product(intx, JavaPriority4_To_OSPriority, -1, \ 3534 "Map Java priorities to OS priorities") \ 3535 range(-1, 127) \ 3536 \ 3537 product(intx, JavaPriority5_To_OSPriority, -1, \ 3538 "Map Java priorities to OS priorities") \ 3539 range(-1, 127) \ 3540 \ 3541 product(intx, JavaPriority6_To_OSPriority, -1, \ 3542 "Map Java priorities to OS priorities") \ 3543 range(-1, 127) \ 3544 \ 3545 product(intx, JavaPriority7_To_OSPriority, -1, \ 3546 "Map Java priorities to OS priorities") \ 3547 range(-1, 127) \ 3548 \ 3549 product(intx, JavaPriority8_To_OSPriority, -1, \ 3550 "Map Java priorities to OS priorities") \ 3551 range(-1, 127) \ 3552 \ 3553 product(intx, JavaPriority9_To_OSPriority, -1, \ 3554 "Map Java priorities to OS priorities") \ 3555 range(-1, 127) \ 3556 \ 3557 product(intx, JavaPriority10_To_OSPriority,-1, \ 3558 "Map Java priorities to OS priorities") \ 3559 range(-1, 127) \ 3560 \ 3561 experimental(bool, UseCriticalJavaThreadPriority, false, \ 3562 "Java thread priority 10 maps to critical scheduling priority") \ 3563 \ 3564 experimental(bool, UseCriticalCompilerThreadPriority, false, \ 3565 "Compiler thread(s) run at critical scheduling priority") \ 3566 \ 3567 experimental(bool, UseCriticalCMSThreadPriority, false, \ 3568 "ConcurrentMarkSweep thread runs at critical scheduling priority")\ 3569 \ 3570 /* compiler debugging */ \ 3571 notproduct(intx, CompileTheWorldStartAt, 1, \ 3572 "First class to consider when using +CompileTheWorld") \ 3573 \ 3574 notproduct(intx, CompileTheWorldStopAt, max_jint, \ 3575 "Last class to consider when using +CompileTheWorld") \ 3576 \ 3577 develop(intx, NewCodeParameter, 0, \ 3578 "Testing Only: Create a dedicated integer parameter before " \ 3579 "putback") \ 3580 \ 3581 /* new oopmap storage allocation */ \ 3582 develop(intx, MinOopMapAllocation, 8, \ 3583 "Minimum number of OopMap entries in an OopMapSet") \ 3584 \ 3585 /* Background Compilation */ \ 3586 develop(intx, LongCompileThreshold, 50, \ 3587 "Used with +TraceLongCompiles") \ 3588 \ 3589 /* recompilation */ \ 3590 product_pd(intx, CompileThreshold, \ 3591 "number of interpreted method invocations before (re-)compiling") \ 3592 constraint(CompileThresholdConstraintFunc, AfterErgo) \ 3593 \ 3594 product(double, CompileThresholdScaling, 1.0, \ 3595 "Factor to control when first compilation happens " \ 3596 "(both with and without tiered compilation): " \ 3597 "values greater than 1.0 delay counter overflow, " \ 3598 "values between 0 and 1.0 rush counter overflow, " \ 3599 "value of 1.0 leaves compilation thresholds unchanged " \ 3600 "value of 0.0 is equivalent to -Xint. " \ 3601 "" \ 3602 "Flag can be set as per-method option. " \ 3603 "If a value is specified for a method, compilation thresholds " \ 3604 "for that method are scaled by both the value of the global flag "\ 3605 "and the value of the per-method flag.") \ 3606 range(0.0, DBL_MAX) \ 3607 \ 3608 product(intx, Tier0InvokeNotifyFreqLog, 7, \ 3609 "Interpreter (tier 0) invocation notification frequency") \ 3610 range(0, 30) \ 3611 \ 3612 product(intx, Tier2InvokeNotifyFreqLog, 11, \ 3613 "C1 without MDO (tier 2) invocation notification frequency") \ 3614 range(0, 30) \ 3615 \ 3616 product(intx, Tier3InvokeNotifyFreqLog, 10, \ 3617 "C1 with MDO profiling (tier 3) invocation notification " \ 3618 "frequency") \ 3619 range(0, 30) \ 3620 \ 3621 product(intx, Tier23InlineeNotifyFreqLog, 20, \ 3622 "Inlinee invocation (tiers 2 and 3) notification frequency") \ 3623 range(0, 30) \ 3624 \ 3625 product(intx, Tier0BackedgeNotifyFreqLog, 10, \ 3626 "Interpreter (tier 0) invocation notification frequency") \ 3627 range(0, 30) \ 3628 \ 3629 product(intx, Tier2BackedgeNotifyFreqLog, 14, \ 3630 "C1 without MDO (tier 2) invocation notification frequency") \ 3631 range(0, 30) \ 3632 \ 3633 product(intx, Tier3BackedgeNotifyFreqLog, 13, \ 3634 "C1 with MDO profiling (tier 3) invocation notification " \ 3635 "frequency") \ 3636 range(0, 30) \ 3637 \ 3638 product(intx, Tier2CompileThreshold, 0, \ 3639 "threshold at which tier 2 compilation is invoked") \ 3640 range(0, max_jint) \ 3641 \ 3642 product(intx, Tier2BackEdgeThreshold, 0, \ 3643 "Back edge threshold at which tier 2 compilation is invoked") \ 3644 range(0, max_jint) \ 3645 \ 3646 product(intx, Tier3InvocationThreshold, 200, \ 3647 "Compile if number of method invocations crosses this " \ 3648 "threshold") \ 3649 range(0, max_jint) \ 3650 \ 3651 product(intx, Tier3MinInvocationThreshold, 100, \ 3652 "Minimum invocation to compile at tier 3") \ 3653 range(0, max_jint) \ 3654 \ 3655 product(intx, Tier3CompileThreshold, 2000, \ 3656 "Threshold at which tier 3 compilation is invoked (invocation " \ 3657 "minimum must be satisfied)") \ 3658 range(0, max_jint) \ 3659 \ 3660 product(intx, Tier3BackEdgeThreshold, 60000, \ 3661 "Back edge threshold at which tier 3 OSR compilation is invoked") \ 3662 range(0, max_jint) \ 3663 \ 3664 product(intx, Tier4InvocationThreshold, 5000, \ 3665 "Compile if number of method invocations crosses this " \ 3666 "threshold") \ 3667 range(0, max_jint) \ 3668 \ 3669 product(intx, Tier4MinInvocationThreshold, 600, \ 3670 "Minimum invocation to compile at tier 4") \ 3671 range(0, max_jint) \ 3672 \ 3673 product(intx, Tier4CompileThreshold, 15000, \ 3674 "Threshold at which tier 4 compilation is invoked (invocation " \ 3675 "minimum must be satisfied") \ 3676 range(0, max_jint) \ 3677 \ 3678 product(intx, Tier4BackEdgeThreshold, 40000, \ 3679 "Back edge threshold at which tier 4 OSR compilation is invoked") \ 3680 range(0, max_jint) \ 3681 \ 3682 product(intx, Tier3DelayOn, 5, \ 3683 "If C2 queue size grows over this amount per compiler thread " \ 3684 "stop compiling at tier 3 and start compiling at tier 2") \ 3685 range(0, max_jint) \ 3686 \ 3687 product(intx, Tier3DelayOff, 2, \ 3688 "If C2 queue size is less than this amount per compiler thread " \ 3689 "allow methods compiled at tier 2 transition to tier 3") \ 3690 range(0, max_jint) \ 3691 \ 3692 product(intx, Tier3LoadFeedback, 5, \ 3693 "Tier 3 thresholds will increase twofold when C1 queue size " \ 3694 "reaches this amount per compiler thread") \ 3695 range(0, max_jint) \ 3696 \ 3697 product(intx, Tier4LoadFeedback, 3, \ 3698 "Tier 4 thresholds will increase twofold when C2 queue size " \ 3699 "reaches this amount per compiler thread") \ 3700 range(0, max_jint) \ 3701 \ 3702 product(intx, TieredCompileTaskTimeout, 50, \ 3703 "Kill compile task if method was not used within " \ 3704 "given timeout in milliseconds") \ 3705 range(0, max_intx) \ 3706 \ 3707 product(intx, TieredStopAtLevel, 4, \ 3708 "Stop at given compilation level") \ 3709 range(0, 4) \ 3710 \ 3711 product(intx, Tier0ProfilingStartPercentage, 200, \ 3712 "Start profiling in interpreter if the counters exceed tier 3 " \ 3713 "thresholds by the specified percentage") \ 3714 range(0, max_jint) \ 3715 \ 3716 product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ 3717 "Increase the compile threshold for C1 compilation if the code " \ 3718 "cache is filled by the specified percentage") \ 3719 range(0, 99) \ 3720 \ 3721 product(intx, TieredRateUpdateMinTime, 1, \ 3722 "Minimum rate sampling interval (in milliseconds)") \ 3723 range(0, max_intx) \ 3724 \ 3725 product(intx, TieredRateUpdateMaxTime, 25, \ 3726 "Maximum rate sampling interval (in milliseconds)") \ 3727 range(0, max_intx) \ 3728 \ 3729 product_pd(bool, TieredCompilation, \ 3730 "Enable tiered compilation") \ 3731 \ 3732 product(bool, PrintTieredEvents, false, \ 3733 "Print tiered events notifications") \ 3734 \ 3735 product_pd(intx, OnStackReplacePercentage, \ 3736 "NON_TIERED number of method invocations/branches (expressed as " \ 3737 "% of CompileThreshold) before (re-)compiling OSR code") \ 3738 constraint(OnStackReplacePercentageConstraintFunc, AfterErgo) \ 3739 \ 3740 product(intx, InterpreterProfilePercentage, 33, \ 3741 "NON_TIERED number of method invocations/branches (expressed as " \ 3742 "% of CompileThreshold) before profiling in the interpreter") \ 3743 range(0, 100) \ 3744 \ 3745 develop(intx, MaxRecompilationSearchLength, 10, \ 3746 "The maximum number of frames to inspect when searching for " \ 3747 "recompilee") \ 3748 \ 3749 develop(intx, MaxInterpretedSearchLength, 3, \ 3750 "The maximum number of interpreted frames to skip when searching "\ 3751 "for recompilee") \ 3752 \ 3753 develop(intx, DesiredMethodLimit, 8000, \ 3754 "The desired maximum method size (in bytecodes) after inlining") \ 3755 \ 3756 develop(intx, HugeMethodLimit, 8000, \ 3757 "Don't compile methods larger than this if " \ 3758 "+DontCompileHugeMethods") \ 3759 \ 3760 /* New JDK 1.4 reflection implementation */ \ 3761 \ 3762 develop(intx, FastSuperclassLimit, 8, \ 3763 "Depth of hardwired instanceof accelerator array") \ 3764 \ 3765 /* Properties for Java libraries */ \ 3766 \ 3767 product(uint64_t, MaxDirectMemorySize, 0, \ 3768 "Maximum total size of NIO direct-buffer allocations") \ 3769 range(0, max_jlong) \ 3770 \ 3771 /* Flags used for temporary code during development */ \ 3772 \ 3773 diagnostic(bool, UseNewCode, false, \ 3774 "Testing Only: Use the new version while testing") \ 3775 \ 3776 diagnostic(bool, UseNewCode2, false, \ 3777 "Testing Only: Use the new version while testing") \ 3778 \ 3779 diagnostic(bool, UseNewCode3, false, \ 3780 "Testing Only: Use the new version while testing") \ 3781 \ 3782 /* flags for performance data collection */ \ 3783 \ 3784 product(bool, UsePerfData, true, \ 3785 "Flag to disable jvmstat instrumentation for performance testing "\ 3786 "and problem isolation purposes") \ 3787 \ 3788 product(bool, PerfDataSaveToFile, false, \ 3789 "Save PerfData memory to hsperfdata_<pid> file on exit") \ 3790 \ 3791 product(ccstr, PerfDataSaveFile, NULL, \ 3792 "Save PerfData memory to the specified absolute pathname. " \ 3793 "The string %p in the file name (if present) " \ 3794 "will be replaced by pid") \ 3795 \ 3796 product(intx, PerfDataSamplingInterval, 50, \ 3797 "Data sampling interval (in milliseconds)") \ 3798 range(PeriodicTask::min_interval, max_jint) \ 3799 constraint(PerfDataSamplingIntervalFunc, AfterErgo) \ 3800 \ 3801 develop(bool, PerfTraceDataCreation, false, \ 3802 "Trace creation of Performance Data Entries") \ 3803 \ 3804 develop(bool, PerfTraceMemOps, false, \ 3805 "Trace PerfMemory create/attach/detach calls") \ 3806 \ 3807 product(bool, PerfDisableSharedMem, false, \ 3808 "Store performance data in standard memory") \ 3809 \ 3810 product(intx, PerfDataMemorySize, 32*K, \ 3811 "Size of performance data memory region. Will be rounded " \ 3812 "up to a multiple of the native os page size.") \ 3813 range(128, 32*64*K) \ 3814 \ 3815 product(intx, PerfMaxStringConstLength, 1024, \ 3816 "Maximum PerfStringConstant string length before truncation") \ 3817 range(32, 32*K) \ 3818 \ 3819 product(bool, PerfAllowAtExitRegistration, false, \ 3820 "Allow registration of atexit() methods") \ 3821 \ 3822 product(bool, PerfBypassFileSystemCheck, false, \ 3823 "Bypass Win32 file system criteria checks (Windows Only)") \ 3824 \ 3825 product(intx, UnguardOnExecutionViolation, 0, \ 3826 "Unguard page and retry on no-execute fault (Win32 only) " \ 3827 "0=off, 1=conservative, 2=aggressive") \ 3828 range(0, 2) \ 3829 \ 3830 /* Serviceability Support */ \ 3831 \ 3832 product(bool, ManagementServer, false, \ 3833 "Create JMX Management Server") \ 3834 \ 3835 product(bool, DisableAttachMechanism, false, \ 3836 "Disable mechanism that allows tools to attach to this VM") \ 3837 \ 3838 product(bool, StartAttachListener, false, \ 3839 "Always start Attach Listener at VM startup") \ 3840 \ 3841 manageable(bool, PrintConcurrentLocks, false, \ 3842 "Print java.util.concurrent locks in thread dump") \ 3843 \ 3844 product(bool, TransmitErrorReport, false, \ 3845 "Enable error report transmission on erroneous termination") \ 3846 \ 3847 product(ccstr, ErrorReportServer, NULL, \ 3848 "Override built-in error report server address") \ 3849 \ 3850 /* Shared spaces */ \ 3851 \ 3852 product(bool, UseSharedSpaces, true, \ 3853 "Use shared spaces for metadata") \ 3854 \ 3855 product(bool, VerifySharedSpaces, false, \ 3856 "Verify shared spaces (false for default archive, true for " \ 3857 "archive specified by -XX:SharedArchiveFile)") \ 3858 \ 3859 product(bool, RequireSharedSpaces, false, \ 3860 "Require shared spaces for metadata") \ 3861 \ 3862 product(bool, DumpSharedSpaces, false, \ 3863 "Special mode: JVM reads a class list, loads classes, builds " \ 3864 "shared spaces, and dumps the shared spaces to a file to be " \ 3865 "used in future JVM runs") \ 3866 \ 3867 product(bool, PrintSharedSpaces, false, \ 3868 "Print usage of shared spaces") \ 3869 \ 3870 product(bool, PrintSharedArchiveAndExit, false, \ 3871 "Print shared archive file contents") \ 3872 \ 3873 product(bool, PrintSharedDictionary, false, \ 3874 "If PrintSharedArchiveAndExit is true, also print the shared " \ 3875 "dictionary") \ 3876 \ 3877 product(size_t, SharedReadWriteSize, DEFAULT_SHARED_READ_WRITE_SIZE, \ 3878 "Size of read-write space for metadata (in bytes)") \ 3879 range(MIN_SHARED_READ_WRITE_SIZE, MAX_SHARED_READ_WRITE_SIZE) \ 3880 constraint(SharedReadWriteSizeConstraintFunc,AfterErgo) \ 3881 \ 3882 product(size_t, SharedReadOnlySize, DEFAULT_SHARED_READ_ONLY_SIZE, \ 3883 "Size of read-only space for metadata (in bytes)") \ 3884 range(MIN_SHARED_READ_ONLY_SIZE, MAX_SHARED_READ_ONLY_SIZE) \ 3885 constraint(SharedReadOnlySizeConstraintFunc,AfterErgo) \ 3886 \ 3887 product(size_t, SharedMiscDataSize, DEFAULT_SHARED_MISC_DATA_SIZE, \ 3888 "Size of the shared miscellaneous data area (in bytes)") \ 3889 range(MIN_SHARED_MISC_DATA_SIZE, MAX_SHARED_MISC_DATA_SIZE) \ 3890 constraint(SharedMiscDataSizeConstraintFunc,AfterErgo) \ 3891 \ 3892 product(size_t, SharedMiscCodeSize, DEFAULT_SHARED_MISC_CODE_SIZE, \ 3893 "Size of the shared miscellaneous code area (in bytes)") \ 3894 range(MIN_SHARED_MISC_CODE_SIZE, MAX_SHARED_MISC_CODE_SIZE) \ 3895 constraint(SharedMiscCodeSizeConstraintFunc,AfterErgo) \ 3896 \ 3897 product(size_t, SharedBaseAddress, LP64_ONLY(32*G) \ 3898 NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \ 3899 "Address to allocate shared memory region for class data") \ 3900 range(0, SIZE_MAX) \ 3901 \ 3902 product(uintx, SharedSymbolTableBucketSize, 4, \ 3903 "Average number of symbols per bucket in shared table") \ 3904 range(2, 246) \ 3905 \ 3906 diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false, \ 3907 "Do not quit -Xshare:dump even if we encounter unverifiable " \ 3908 "classes. Just exclude them from the shared dictionary.") \ 3909 \ 3910 diagnostic(bool, PrintMethodHandleStubs, false, \ 3911 "Print generated stub code for method handles") \ 3912 \ 3913 develop(bool, TraceMethodHandles, false, \ 3914 "trace internal method handle operations") \ 3915 \ 3916 diagnostic(bool, VerifyMethodHandles, trueInDebug, \ 3917 "perform extra checks when constructing method handles") \ 3918 \ 3919 diagnostic(bool, ShowHiddenFrames, false, \ 3920 "show method handle implementation frames (usually hidden)") \ 3921 \ 3922 experimental(bool, TrustFinalNonStaticFields, false, \ 3923 "trust final non-static declarations for constant folding") \ 3924 \ 3925 diagnostic(bool, FoldStableValues, true, \ 3926 "Optimize loads from stable fields (marked w/ @Stable)") \ 3927 \ 3928 develop(bool, TraceInvokeDynamic, false, \ 3929 "trace internal invoke dynamic operations") \ 3930 \ 3931 diagnostic(bool, PauseAtStartup, false, \ 3932 "Causes the VM to pause at startup time and wait for the pause " \ 3933 "file to be removed (default: ./vm.paused.<pid>)") \ 3934 \ 3935 diagnostic(ccstr, PauseAtStartupFile, NULL, \ 3936 "The file to create and for whose removal to await when pausing " \ 3937 "at startup. (default: ./vm.paused.<pid>)") \ 3938 \ 3939 diagnostic(bool, PauseAtExit, false, \ 3940 "Pause and wait for keypress on exit if a debugger is attached") \ 3941 \ 3942 product(bool, ExtendedDTraceProbes, false, \ 3943 "Enable performance-impacting dtrace probes") \ 3944 \ 3945 product(bool, DTraceMethodProbes, false, \ 3946 "Enable dtrace probes for method-entry and method-exit") \ 3947 \ 3948 product(bool, DTraceAllocProbes, false, \ 3949 "Enable dtrace probes for object allocation") \ 3950 \ 3951 product(bool, DTraceMonitorProbes, false, \ 3952 "Enable dtrace probes for monitor events") \ 3953 \ 3954 product(bool, RelaxAccessControlCheck, false, \ 3955 "Relax the access control checks in the verifier") \ 3956 \ 3957 product(uintx, StringTableSize, defaultStringTableSize, \ 3958 "Number of buckets in the interned String table") \ 3959 range(minimumStringTableSize, 111*defaultStringTableSize) \ 3960 \ 3961 experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \ 3962 "Number of buckets in the JVM internal Symbol table") \ 3963 range(minimumSymbolTableSize, 111*defaultSymbolTableSize) \ 3964 \ 3965 product(bool, UseStringDeduplication, false, \ 3966 "Use string deduplication") \ 3967 \ 3968 product(uintx, StringDeduplicationAgeThreshold, 3, \ 3969 "A string must reach this age (or be promoted to an old region) " \ 3970 "to be considered for deduplication") \ 3971 range(1, markOopDesc::max_age) \ 3972 \ 3973 diagnostic(bool, StringDeduplicationResizeALot, false, \ 3974 "Force table resize every time the table is scanned") \ 3975 \ 3976 diagnostic(bool, StringDeduplicationRehashALot, false, \ 3977 "Force table rehash every time the table is scanned") \ 3978 \ 3979 diagnostic(bool, WhiteBoxAPI, false, \ 3980 "Enable internal testing APIs") \ 3981 \ 3982 experimental(intx, SurvivorAlignmentInBytes, 0, \ 3983 "Default survivor space alignment in bytes") \ 3984 constraint(SurvivorAlignmentInBytesConstraintFunc,AfterErgo) \ 3985 \ 3986 product(bool , AllowNonVirtualCalls, false, \ 3987 "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \ 3988 \ 3989 product(ccstr, DumpLoadedClassList, NULL, \ 3990 "Dump the names all loaded classes, that could be stored into " \ 3991 "the CDS archive, in the specified file") \ 3992 \ 3993 product(ccstr, SharedClassListFile, NULL, \ 3994 "Override the default CDS class list") \ 3995 \ 3996 diagnostic(ccstr, SharedArchiveFile, NULL, \ 3997 "Override the default location of the CDS archive file") \ 3998 \ 3999 product(ccstr, ExtraSharedClassListFile, NULL, \ 4000 "Extra classlist for building the CDS archive file") \ 4001 \ 4002 experimental(size_t, ArrayAllocatorMallocLimit, \ 4003 SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \ 4004 "Allocation less than this value will be allocated " \ 4005 "using malloc. Larger allocations will use mmap.") \ 4006 \ 4007 experimental(bool, AlwaysAtomicAccesses, false, \ 4008 "Accesses to all variables should always be atomic") \ 4009 \ 4010 product(bool, EnableTracing, false, \ 4011 "Enable event-based tracing") \ 4012 \ 4013 product(bool, UseLockedTracing, false, \ 4014 "Use locked-tracing when doing event-based tracing") \ 4015 \ 4016 diagnostic(bool, UseUnalignedAccesses, false, \ 4017 "Use unaligned memory accesses in Unsafe") \ 4018 \ 4019 product_pd(bool, PreserveFramePointer, \ 4020 "Use the FP register for holding the frame pointer " \ 4021 "and not as a general purpose register.") \ 4022 \ 4023 diagnostic(bool, CheckIntrinsics, true, \ 4024 "When a class C is loaded, check that " \ 4025 "(1) all intrinsics defined by the VM for class C are present "\ 4026 "in the loaded class file and are marked with the " \ 4027 "@HotSpotIntrinsicCandidate annotation, that " \ 4028 "(2) there is an intrinsic registered for all loaded methods " \ 4029 "that are annotated with the @HotSpotIntrinsicCandidate " \ 4030 "annotation, and that " \ 4031 "(3) no orphan methods exist for class C (i.e., methods for " \ 4032 "which the VM declares an intrinsic but that are not declared "\ 4033 "in the loaded class C. " \ 4034 "Check (3) is available only in debug builds.") \ 4035 \ 4036 diagnostic_pd(intx, InitArrayShortSize, \ 4037 "Threshold small size (in bytes) for clearing arrays. " \ 4038 "Anything this size or smaller may get converted to discrete " \ 4039 "scalar stores.") \ 4040 range(0, max_intx) \ 4041 constraint(InitArrayShortSizeConstraintFunc, AfterErgo) \ 4042 \ 4043 diagnostic(bool, CompilerDirectivesIgnoreCompileCommands, false, \ 4044 "Disable backwards compatibility for compile commands.") \ 4045 \ 4046 diagnostic(bool, CompilerDirectivesPrint, false, \ 4047 "Print compiler directives on installation.") \ 4048 diagnostic(int, CompilerDirectivesLimit, 50, \ 4049 "Limit on number of compiler directives.") 4050 4051 4052/* 4053 * Macros for factoring of globals 4054 */ 4055 4056// Interface macros 4057#define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4058#define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name; 4059#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name; 4060#define DECLARE_PD_DIAGNOSTIC_FLAG(type, name, doc) extern "C" type name; 4061#define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name; 4062#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name; 4063#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name; 4064#ifdef PRODUCT 4065#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) const type name = value; 4066#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) const type name = pd_##name; 4067#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) const type name = value; 4068#else 4069#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name; 4070#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name; 4071#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4072#endif // PRODUCT 4073// Special LP64 flags, product only needed for now. 4074#ifdef _LP64 4075#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4076#else 4077#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value; 4078#endif // _LP64 4079 4080// Implementation macros 4081#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value; 4082#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name; 4083#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value; 4084#define MATERIALIZE_PD_DIAGNOSTIC_FLAG(type, name, doc) type name = pd_##name; 4085#define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value; 4086#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value; 4087#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value; 4088#ifdef PRODUCT 4089#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) 4090#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) 4091#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) 4092#else 4093#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value; 4094#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name; 4095#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value; 4096#endif // PRODUCT 4097#ifdef _LP64 4098#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value; 4099#else 4100#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */ 4101#endif // _LP64 4102 4103// Only materialize src code for range checking when required, ignore otherwise 4104#define IGNORE_RANGE(a, b) 4105// Only materialize src code for contraint checking when required, ignore otherwise 4106#define IGNORE_CONSTRAINT(func,type) 4107 4108#define IGNORE_WRITEABLE(type) 4109 4110RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4111 DECLARE_PD_DEVELOPER_FLAG, \ 4112 DECLARE_PRODUCT_FLAG, \ 4113 DECLARE_PD_PRODUCT_FLAG, \ 4114 DECLARE_DIAGNOSTIC_FLAG, \ 4115 DECLARE_PD_DIAGNOSTIC_FLAG, \ 4116 DECLARE_EXPERIMENTAL_FLAG, \ 4117 DECLARE_NOTPRODUCT_FLAG, \ 4118 DECLARE_MANAGEABLE_FLAG, \ 4119 DECLARE_PRODUCT_RW_FLAG, \ 4120 DECLARE_LP64_PRODUCT_FLAG, \ 4121 IGNORE_RANGE, \ 4122 IGNORE_CONSTRAINT, \ 4123 IGNORE_WRITEABLE) 4124 4125RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4126 DECLARE_PD_DEVELOPER_FLAG, \ 4127 DECLARE_PRODUCT_FLAG, \ 4128 DECLARE_PD_PRODUCT_FLAG, \ 4129 DECLARE_DIAGNOSTIC_FLAG, \ 4130 DECLARE_PD_DIAGNOSTIC_FLAG, \ 4131 DECLARE_NOTPRODUCT_FLAG, \ 4132 IGNORE_RANGE, \ 4133 IGNORE_CONSTRAINT, \ 4134 IGNORE_WRITEABLE) 4135 4136ARCH_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4137 DECLARE_PRODUCT_FLAG, \ 4138 DECLARE_DIAGNOSTIC_FLAG, \ 4139 DECLARE_EXPERIMENTAL_FLAG, \ 4140 DECLARE_NOTPRODUCT_FLAG, \ 4141 IGNORE_RANGE, \ 4142 IGNORE_CONSTRAINT, \ 4143 IGNORE_WRITEABLE) 4144 4145// Extensions 4146 4147#include "runtime/globals_ext.hpp" 4148 4149#endif // SHARE_VM_RUNTIME_GLOBALS_HPP 4150