globals.hpp revision 9568:6fe3f29f5d00
112657Skvn/* 212657Skvn * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 312657Skvn * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 412657Skvn * 512657Skvn * This code is free software; you can redistribute it and/or modify it 612657Skvn * under the terms of the GNU General Public License version 2 only, as 712657Skvn * published by the Free Software Foundation. 812657Skvn * 912657Skvn * This code is distributed in the hope that it will be useful, but WITHOUT 1012657Skvn * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1112657Skvn * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1212657Skvn * version 2 for more details (a copy is included in the LICENSE file that 1312657Skvn * accompanied this code). 1412657Skvn * 1512657Skvn * You should have received a copy of the GNU General Public License version 1612657Skvn * 2 along with this work; if not, write to the Free Software Foundation, 1712657Skvn * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1812657Skvn * 1912657Skvn * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 2012657Skvn * or visit www.oracle.com if you need additional information or have any 2112657Skvn * questions. 2212657Skvn * 2312657Skvn */ 2412657Skvn 2513264Siveresov#ifndef SHARE_VM_RUNTIME_GLOBALS_HPP 2612657Skvn#define SHARE_VM_RUNTIME_GLOBALS_HPP 2712657Skvn 2812657Skvn#include <float.h> 2912657Skvn#include "utilities/debug.hpp" 3012657Skvn#include <float.h> // for DBL_MAX 3112657Skvn 3212657Skvn// use this for flags that are true per default in the tiered build 3312657Skvn// but false in non-tiered builds, and vice versa 3412657Skvn#ifdef TIERED 3512657Skvn#define trueInTiered true 3612657Skvn#define falseInTiered false 3712657Skvn#else 3812657Skvn#define trueInTiered false 3912657Skvn#define falseInTiered true 4012657Skvn#endif 4112657Skvn 4212657Skvn#ifdef TARGET_ARCH_x86 4312657Skvn# include "globals_x86.hpp" 4412657Skvn#endif 4512657Skvn#ifdef TARGET_ARCH_sparc 4612657Skvn# include "globals_sparc.hpp" 4712657Skvn#endif 4812657Skvn#ifdef TARGET_ARCH_zero 4912657Skvn# include "globals_zero.hpp" 5012657Skvn#endif 5112657Skvn#ifdef TARGET_ARCH_arm 5212657Skvn# include "globals_arm.hpp" 5312657Skvn#endif 5412657Skvn#ifdef TARGET_ARCH_ppc 5512657Skvn# include "globals_ppc.hpp" 5612657Skvn#endif 5712657Skvn#ifdef TARGET_ARCH_aarch64 5812657Skvn# include "globals_aarch64.hpp" 5912657Skvn#endif 6012657Skvn#ifdef TARGET_OS_FAMILY_linux 6112657Skvn# include "globals_linux.hpp" 6212657Skvn#endif 6312657Skvn#ifdef TARGET_OS_FAMILY_solaris 6412657Skvn# include "globals_solaris.hpp" 6512657Skvn#endif 6612657Skvn#ifdef TARGET_OS_FAMILY_windows 6712657Skvn# include "globals_windows.hpp" 6812657Skvn#endif 6912657Skvn#ifdef TARGET_OS_FAMILY_aix 7012657Skvn# include "globals_aix.hpp" 7112657Skvn#endif 7212657Skvn#ifdef TARGET_OS_FAMILY_bsd 7312657Skvn# include "globals_bsd.hpp" 7412657Skvn#endif 7512657Skvn#ifdef TARGET_OS_ARCH_linux_x86 7612657Skvn# include "globals_linux_x86.hpp" 7712657Skvn#endif 7812657Skvn#ifdef TARGET_OS_ARCH_linux_sparc 7912657Skvn# include "globals_linux_sparc.hpp" 8012657Skvn#endif 8112657Skvn#ifdef TARGET_OS_ARCH_linux_zero 8212657Skvn# include "globals_linux_zero.hpp" 8312657Skvn#endif 8412657Skvn#ifdef TARGET_OS_ARCH_solaris_x86 8512657Skvn# include "globals_solaris_x86.hpp" 8612657Skvn#endif 8712657Skvn#ifdef TARGET_OS_ARCH_solaris_sparc 8812657Skvn# include "globals_solaris_sparc.hpp" 8912657Skvn#endif 9012657Skvn#ifdef TARGET_OS_ARCH_windows_x86 9112657Skvn# include "globals_windows_x86.hpp" 9212657Skvn#endif 9312657Skvn#ifdef TARGET_OS_ARCH_linux_arm 9412657Skvn# include "globals_linux_arm.hpp" 9512657Skvn#endif 9612657Skvn#ifdef TARGET_OS_ARCH_linux_ppc 9712657Skvn# include "globals_linux_ppc.hpp" 9813175Siveresov#endif 9913175Siveresov#ifdef TARGET_OS_ARCH_linux_aarch64 10012657Skvn# include "globals_linux_aarch64.hpp" 10112657Skvn#endif 10212657Skvn#ifdef TARGET_OS_ARCH_aix_ppc 10312657Skvn# include "globals_aix_ppc.hpp" 10412657Skvn#endif 10512657Skvn#ifdef TARGET_OS_ARCH_bsd_x86 10612657Skvn# include "globals_bsd_x86.hpp" 10712657Skvn#endif 10812657Skvn#ifdef TARGET_OS_ARCH_bsd_zero 10912657Skvn# include "globals_bsd_zero.hpp" 11012657Skvn#endif 11112657Skvn#ifdef COMPILER1 11212657Skvn#ifdef TARGET_ARCH_x86 11312657Skvn# include "c1_globals_x86.hpp" 11412657Skvn#endif 11512657Skvn#ifdef TARGET_ARCH_sparc 11612657Skvn# include "c1_globals_sparc.hpp" 11712968Siveresov#endif 11812657Skvn#ifdef TARGET_ARCH_arm 11912657Skvn# include "c1_globals_arm.hpp" 12012657Skvn#endif 12112657Skvn#ifdef TARGET_ARCH_aarch64 12212657Skvn# include "c1_globals_aarch64.hpp" 12312657Skvn#endif 12412657Skvn#ifdef TARGET_OS_FAMILY_linux 12512657Skvn# include "c1_globals_linux.hpp" 12612657Skvn#endif 12712657Skvn#ifdef TARGET_OS_FAMILY_solaris 12812657Skvn# include "c1_globals_solaris.hpp" 12912657Skvn#endif 13012657Skvn#ifdef TARGET_OS_FAMILY_windows 13112657Skvn# include "c1_globals_windows.hpp" 13212657Skvn#endif 13312657Skvn#ifdef TARGET_OS_FAMILY_aix 13412657Skvn# include "c1_globals_aix.hpp" 13512657Skvn#endif 13612657Skvn#ifdef TARGET_OS_FAMILY_bsd 13712657Skvn# include "c1_globals_bsd.hpp" 13812657Skvn#endif 13912657Skvn#ifdef TARGET_ARCH_ppc 14012657Skvn# include "c1_globals_ppc.hpp" 14112657Skvn#endif 14212657Skvn#endif 14312657Skvn#ifdef COMPILER2 14412657Skvn#ifdef TARGET_ARCH_x86 14512657Skvn# include "c2_globals_x86.hpp" 14612657Skvn#endif 14712657Skvn#ifdef TARGET_ARCH_sparc 14812657Skvn# include "c2_globals_sparc.hpp" 14912657Skvn#endif 15012657Skvn#ifdef TARGET_ARCH_arm 15112657Skvn# include "c2_globals_arm.hpp" 15212657Skvn#endif 15312657Skvn#ifdef TARGET_ARCH_ppc 15412657Skvn# include "c2_globals_ppc.hpp" 15512657Skvn#endif 15612657Skvn#ifdef TARGET_ARCH_aarch64 15712657Skvn# include "c2_globals_aarch64.hpp" 15812657Skvn#endif 15912657Skvn#ifdef TARGET_OS_FAMILY_linux 16012657Skvn# include "c2_globals_linux.hpp" 16112657Skvn#endif 16212657Skvn#ifdef TARGET_OS_FAMILY_solaris 16312657Skvn# include "c2_globals_solaris.hpp" 16412657Skvn#endif 16512657Skvn#ifdef TARGET_OS_FAMILY_windows 16612657Skvn# include "c2_globals_windows.hpp" 16712657Skvn#endif 16812657Skvn#ifdef TARGET_OS_FAMILY_aix 16912657Skvn# include "c2_globals_aix.hpp" 17012657Skvn#endif 17112657Skvn#ifdef TARGET_OS_FAMILY_bsd 17212657Skvn# include "c2_globals_bsd.hpp" 17312657Skvn#endif 17412657Skvn#endif 17512657Skvn#ifdef SHARK 17612657Skvn#ifdef TARGET_ARCH_zero 17712657Skvn# include "shark_globals_zero.hpp" 17812657Skvn#endif 17912657Skvn#endif 18012657Skvn 18112657Skvn#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !INCLUDE_JVMCI 18212657Skvndefine_pd_global(bool, BackgroundCompilation, false); 18312657Skvndefine_pd_global(bool, UseTLAB, false); 18412657Skvndefine_pd_global(bool, CICompileOSR, false); 18512657Skvndefine_pd_global(bool, UseTypeProfile, false); 18612657Skvndefine_pd_global(bool, UseOnStackReplacement, false); 18712657Skvndefine_pd_global(bool, InlineIntrinsics, false); 18812657Skvndefine_pd_global(bool, PreferInterpreterNativeStubs, true); 18912657Skvndefine_pd_global(bool, ProfileInterpreter, false); 19012657Skvndefine_pd_global(bool, ProfileTraps, false); 19112657Skvndefine_pd_global(bool, TieredCompilation, false); 19212657Skvn 19312657Skvndefine_pd_global(intx, CompileThreshold, 0); 19412657Skvn 19512657Skvndefine_pd_global(intx, OnStackReplacePercentage, 0); 19612657Skvndefine_pd_global(bool, ResizeTLAB, false); 19712657Skvndefine_pd_global(intx, FreqInlineSize, 0); 19812657Skvndefine_pd_global(size_t, NewSizeThreadIncrease, 4*K); 19912657Skvndefine_pd_global(intx, InlineClassNatives, true); 20012657Skvndefine_pd_global(intx, InlineUnsafeOps, true); 20112657Skvndefine_pd_global(intx, InitialCodeCacheSize, 160*K); 20212657Skvndefine_pd_global(intx, ReservedCodeCacheSize, 32*M); 20312657Skvndefine_pd_global(intx, NonProfiledCodeHeapSize, 0); 20412657Skvndefine_pd_global(intx, ProfiledCodeHeapSize, 0); 20512657Skvndefine_pd_global(intx, NonNMethodCodeHeapSize, 32*M); 20612657Skvn 20712657Skvndefine_pd_global(intx, CodeCacheExpansionSize, 32*K); 20812657Skvndefine_pd_global(intx, CodeCacheMinBlockLength, 1); 20912657Skvndefine_pd_global(intx, CodeCacheMinimumUseSpace, 200*K); 21012657Skvndefine_pd_global(size_t, MetaspaceSize, ScaleForWordSize(4*M)); 21112657Skvndefine_pd_global(bool, NeverActAsServerClassMachine, true); 21212657Skvndefine_pd_global(uint64_t,MaxRAM, 1ULL*G); 21312657Skvn#define CI_COMPILER_COUNT 0 21412657Skvn#else 21512657Skvn 21612657Skvn#if defined(COMPILER2) || INCLUDE_JVMCI 21712657Skvn#define CI_COMPILER_COUNT 2 21812657Skvn#else 21912657Skvn#define CI_COMPILER_COUNT 1 22012657Skvn#endif // COMPILER2 || INCLUDE_JVMCI 22112657Skvn 22212657Skvn#endif // no compilers 22312657Skvn 22412657Skvn// string type aliases used only in this file 22512657Skvntypedef const char* ccstr; 22612657Skvntypedef const char* ccstrlist; // represents string arguments which accumulate 22712657Skvn 22812657Skvnstruct Flag { 22912657Skvn enum Flags { 23012657Skvn // value origin 23112657Skvn DEFAULT = 0, 23212657Skvn COMMAND_LINE = 1, 23312657Skvn ENVIRON_VAR = 2, 23412657Skvn CONFIG_FILE = 3, 23512657Skvn MANAGEMENT = 4, 23612657Skvn ERGONOMIC = 5, 23712657Skvn ATTACH_ON_DEMAND = 6, 23812657Skvn INTERNAL = 7, 23912657Skvn 24012657Skvn LAST_VALUE_ORIGIN = INTERNAL, 24112657Skvn VALUE_ORIGIN_BITS = 4, 24212657Skvn VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS), 24312657Skvn 24412657Skvn // flag kind 24512657Skvn KIND_PRODUCT = 1 << 4, 24612657Skvn KIND_MANAGEABLE = 1 << 5, 24712657Skvn KIND_DIAGNOSTIC = 1 << 6, 24812657Skvn KIND_EXPERIMENTAL = 1 << 7, 24912657Skvn KIND_NOT_PRODUCT = 1 << 8, 25012657Skvn KIND_DEVELOP = 1 << 9, 25112657Skvn KIND_PLATFORM_DEPENDENT = 1 << 10, 25212657Skvn KIND_READ_WRITE = 1 << 11, 25312657Skvn KIND_C1 = 1 << 12, 25412657Skvn KIND_C2 = 1 << 13, 25512657Skvn KIND_ARCH = 1 << 14, 25612657Skvn KIND_SHARK = 1 << 15, 25712657Skvn KIND_LP64_PRODUCT = 1 << 16, 25812657Skvn KIND_COMMERCIAL = 1 << 17, 25912657Skvn KIND_JVMCI = 1 << 18, 26012657Skvn 26112657Skvn KIND_MASK = ~VALUE_ORIGIN_MASK 26212657Skvn }; 26312657Skvn 26412657Skvn enum Error { 26512657Skvn // no error 26612657Skvn SUCCESS = 0, 26712657Skvn // flag name is missing 26812657Skvn MISSING_NAME, 26912657Skvn // flag value is missing 27012657Skvn MISSING_VALUE, 27112657Skvn // error parsing the textual form of the value 27212657Skvn WRONG_FORMAT, 27312657Skvn // flag is not writeable 27412657Skvn NON_WRITABLE, 27512657Skvn // flag value is outside of its bounds 27612657Skvn OUT_OF_BOUNDS, 27712657Skvn // flag value violates its constraint 27812657Skvn VIOLATES_CONSTRAINT, 27912657Skvn // there is no flag with the given name 28012657Skvn INVALID_FLAG, 28112657Skvn // other, unspecified error related to setting the flag 28212657Skvn ERR_OTHER 28312657Skvn }; 28412657Skvn 28512657Skvn enum MsgType { 28612657Skvn NONE = 0, 28712657Skvn DIAGNOSTIC_FLAG_BUT_LOCKED, 28812657Skvn EXPERIMENTAL_FLAG_BUT_LOCKED, 28912657Skvn DEVELOPER_FLAG_BUT_PRODUCT_BUILD, 29012657Skvn NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD 29112657Skvn }; 29212657Skvn 29312657Skvn const char* _type; 29412657Skvn const char* _name; 29512657Skvn void* _addr; 29612657Skvn NOT_PRODUCT(const char* _doc;) 29712657Skvn Flags _flags; 29812657Skvn 29912657Skvn // points to all Flags static array 30012657Skvn static Flag* flags; 30112657Skvn 30212657Skvn // number of flags 30312657Skvn static size_t numFlags; 30412657Skvn 30512657Skvn static Flag* find_flag(const char* name) { return find_flag(name, strlen(name), true, true); }; 30612657Skvn static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false); 30712657Skvn static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false); 30812657Skvn 30912657Skvn void check_writable(); 31012657Skvn 31112657Skvn bool is_bool() const; 31212657Skvn bool get_bool() const; 31312657Skvn void set_bool(bool value); 31412657Skvn 31512657Skvn bool is_int() const; 31612657Skvn int get_int() const; 31712657Skvn void set_int(int value); 31812657Skvn 31912657Skvn bool is_uint() const; 32012657Skvn uint get_uint() const; 32112657Skvn void set_uint(uint value); 32212657Skvn 32312657Skvn bool is_intx() const; 32412657Skvn intx get_intx() const; 32512657Skvn void set_intx(intx value); 32612657Skvn 32712657Skvn bool is_uintx() const; 32812657Skvn uintx get_uintx() const; 32912657Skvn void set_uintx(uintx value); 33012657Skvn 33112657Skvn bool is_uint64_t() const; 33212657Skvn uint64_t get_uint64_t() const; 33312657Skvn void set_uint64_t(uint64_t value); 33412657Skvn 33512657Skvn bool is_size_t() const; 33612657Skvn size_t get_size_t() const; 33712657Skvn void set_size_t(size_t value); 33812657Skvn 33912657Skvn bool is_double() const; 34012657Skvn double get_double() const; 34112657Skvn void set_double(double value); 34212657Skvn 34312657Skvn bool is_ccstr() const; 34412657Skvn bool ccstr_accumulates() const; 34512657Skvn ccstr get_ccstr() const; 34612657Skvn void set_ccstr(ccstr value); 34712657Skvn 34812657Skvn Flags get_origin(); 34912657Skvn void set_origin(Flags origin); 35012657Skvn 35112657Skvn bool is_default(); 35212657Skvn bool is_ergonomic(); 35312968Siveresov bool is_command_line(); 35412968Siveresov 35512968Siveresov bool is_product() const; 35612968Siveresov bool is_manageable() const; 35712968Siveresov bool is_diagnostic() const; 35812657Skvn bool is_experimental() const; 35912657Skvn bool is_notproduct() const; 36012657Skvn bool is_develop() const; 36112657Skvn bool is_read_write() const; 36212657Skvn bool is_commercial() const; 36312657Skvn 36412657Skvn bool is_constant_in_binary() const; 36512657Skvn 36612657Skvn bool is_unlocker() const; 36712657Skvn bool is_unlocked() const; 36812657Skvn bool is_writeable() const; 36912657Skvn bool is_external() const; 37012657Skvn 37112657Skvn bool is_unlocker_ext() const; 37212657Skvn bool is_unlocked_ext() const; 37312657Skvn bool is_writeable_ext() const; 37412657Skvn bool is_external_ext() const; 37512657Skvn 37612657Skvn void unlock_diagnostic(); 37712657Skvn 37812657Skvn Flag::MsgType get_locked_message(char*, int) const; 37912657Skvn void get_locked_message_ext(char*, int) const; 38012657Skvn 38112657Skvn // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges 38212657Skvn void print_on(outputStream* st, bool withComments = false, bool printRanges = false); 38312657Skvn void print_kind(outputStream* st); 38412657Skvn void print_as_flag(outputStream* st); 38512657Skvn 38612657Skvn static const char* flag_error_str(Flag::Error error); 38712657Skvn}; 38812657Skvn 38912657Skvn// debug flags control various aspects of the VM and are global accessible 39012657Skvn 39112657Skvn// use FlagSetting to temporarily change some debug flag 39212657Skvn// e.g. FlagSetting fs(DebugThisAndThat, true); 39312657Skvn// restored to previous value upon leaving scope 39412657Skvnclass FlagSetting { 39512657Skvn bool val; 39612657Skvn bool* flag; 39712657Skvn public: 39812657Skvn FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; } 39912657Skvn ~FlagSetting() { *flag = val; } 40012657Skvn}; 40112657Skvn 40212657Skvn 40312657Skvnclass CounterSetting { 40412657Skvn intx* counter; 40512657Skvn public: 40612657Skvn CounterSetting(intx* cnt) { counter = cnt; (*counter)++; } 40712657Skvn ~CounterSetting() { (*counter)--; } 40812657Skvn}; 40912657Skvn 41012657Skvnclass IntFlagSetting { 41112657Skvn int val; 41212657Skvn int* flag; 41312657Skvn public: 41412657Skvn IntFlagSetting(int& fl, int newValue) { flag = &fl; val = fl; fl = newValue; } 41512657Skvn ~IntFlagSetting() { *flag = val; } 41612657Skvn}; 41712657Skvn 41812657Skvnclass UIntFlagSetting { 41912657Skvn uint val; 42012657Skvn uint* flag; 42112657Skvn public: 42212657Skvn UIntFlagSetting(uint& fl, uint newValue) { flag = &fl; val = fl; fl = newValue; } 42312657Skvn ~UIntFlagSetting() { *flag = val; } 42412657Skvn}; 42512657Skvn 42612657Skvnclass UIntXFlagSetting { 42712657Skvn uintx val; 42812657Skvn uintx* flag; 42912657Skvn public: 43012657Skvn UIntXFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; } 43112657Skvn ~UIntXFlagSetting() { *flag = val; } 43212657Skvn}; 43312657Skvn 43412657Skvnclass DoubleFlagSetting { 43512657Skvn double val; 43612657Skvn double* flag; 43712657Skvn public: 43812657Skvn DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; } 43912657Skvn ~DoubleFlagSetting() { *flag = val; } 44012657Skvn}; 44112657Skvn 44212657Skvnclass SizeTFlagSetting { 44312657Skvn size_t val; 44412657Skvn size_t* flag; 44512657Skvn public: 44612657Skvn SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; } 44712657Skvn ~SizeTFlagSetting() { *flag = val; } 44812657Skvn}; 44912657Skvn 45012657Skvn 45112657Skvnclass CommandLineFlags { 45212657Skvnpublic: 45312657Skvn static Flag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false); 45412657Skvn static Flag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); } 45512657Skvn static Flag::Error boolAtPut(Flag* flag, bool* value, Flag::Flags origin); 45612657Skvn static Flag::Error boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin); 45712657Skvn static Flag::Error boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); } 45812657Skvn 45912968Siveresov static Flag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false); 46012657Skvn static Flag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); } 46112657Skvn static Flag::Error intAtPut(Flag* flag, int* value, Flag::Flags origin); 46212657Skvn static Flag::Error intAtPut(const char* name, size_t len, int* value, Flag::Flags origin); 46312657Skvn static Flag::Error intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); } 46412657Skvn 46512657Skvn static Flag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false); 46612657Skvn static Flag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); } 46712657Skvn static Flag::Error uintAtPut(Flag* flag, uint* value, Flag::Flags origin); 46812657Skvn static Flag::Error uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin); 46912657Skvn static Flag::Error uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); } 47012657Skvn 47112657Skvn static Flag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false); 47212657Skvn static Flag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); } 47312657Skvn static Flag::Error intxAtPut(Flag* flag, intx* value, Flag::Flags origin); 47412657Skvn static Flag::Error intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin); 47512657Skvn static Flag::Error intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); } 47612657Skvn 47712657Skvn static Flag::Error uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false); 47812657Skvn static Flag::Error uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); } 47912657Skvn static Flag::Error uintxAtPut(Flag* flag, uintx* value, Flag::Flags origin); 48012657Skvn static Flag::Error uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin); 48113175Siveresov static Flag::Error uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); } 48213175Siveresov 48312657Skvn static Flag::Error size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false); 48412657Skvn static Flag::Error size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); } 48512657Skvn static Flag::Error size_tAtPut(Flag* flag, size_t* value, Flag::Flags origin); 48612657Skvn static Flag::Error size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin); 48712657Skvn static Flag::Error size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); } 48812657Skvn 48912657Skvn static Flag::Error uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false); 49012657Skvn static Flag::Error uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); } 49112657Skvn static Flag::Error uint64_tAtPut(Flag* flag, uint64_t* value, Flag::Flags origin); 49212657Skvn static Flag::Error uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin); 49312657Skvn static Flag::Error uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); } 49412657Skvn 49512657Skvn static Flag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false); 49612657Skvn static Flag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); } 49712657Skvn static Flag::Error doubleAtPut(Flag* flag, double* value, Flag::Flags origin); 49812657Skvn static Flag::Error doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin); 49912657Skvn static Flag::Error doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); } 50012657Skvn 50112657Skvn static Flag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false); 50212657Skvn static Flag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); } 50312657Skvn // Contract: Flag will make private copy of the incoming value. 50412657Skvn // Outgoing value is always malloc-ed, and caller MUST call free. 50512657Skvn static Flag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin); 50612657Skvn static Flag::Error ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); } 50712657Skvn 50812657Skvn // Returns false if name is not a command line flag. 50912657Skvn static bool wasSetOnCmdline(const char* name, bool* value); 51012657Skvn static void printSetFlags(outputStream* out); 51112657Skvn 51212657Skvn // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges 51312657Skvn static void printFlags(outputStream* out, bool withComments, bool printRanges = false); 51412657Skvn 51512657Skvn static void verify() PRODUCT_RETURN; 51612657Skvn}; 51712657Skvn 51812657Skvn// use this for flags that are true by default in the debug version but 51912657Skvn// false in the optimized version, and vice versa 52012657Skvn#ifdef ASSERT 52112657Skvn#define trueInDebug true 52212657Skvn#define falseInDebug false 52312657Skvn#else 52412657Skvn#define trueInDebug false 52512657Skvn#define falseInDebug true 52612657Skvn#endif 52712657Skvn 52812657Skvn// use this for flags that are true per default in the product build 52912657Skvn// but false in development builds, and vice versa 53012657Skvn#ifdef PRODUCT 53112657Skvn#define trueInProduct true 53212657Skvn#define falseInProduct false 53312657Skvn#else 53412657Skvn#define trueInProduct false 53512657Skvn#define falseInProduct true 53612657Skvn#endif 53712657Skvn 53812657Skvn#ifdef JAVASE_EMBEDDED 53912657Skvn#define falseInEmbedded false 54012657Skvn#else 54112657Skvn#define falseInEmbedded true 54212657Skvn#endif 54312657Skvn 54412657Skvn// develop flags are settable / visible only during development and are constant in the PRODUCT version 54512657Skvn// product flags are always settable / visible 54612657Skvn// notproduct flags are settable / visible only during development and are not declared in the PRODUCT version 54712657Skvn 54812657Skvn// A flag must be declared with one of the following types: 54912657Skvn// bool, int, uint, intx, uintx, size_t, ccstr, double, or uint64_t. 55012657Skvn// The type "ccstr" is an alias for "const char*" and is used 55112657Skvn// only in this file, because the macrology requires single-token type names. 55212657Skvn 55312657Skvn// Note: Diagnostic options not meant for VM tuning or for product modes. 55412657Skvn// They are to be used for VM quality assurance or field diagnosis 55512657Skvn// of VM bugs. They are hidden so that users will not be encouraged to 55612657Skvn// try them as if they were VM ordinary execution options. However, they 55712657Skvn// are available in the product version of the VM. Under instruction 55812657Skvn// from support engineers, VM customers can turn them on to collect 55912657Skvn// diagnostic information about VM problems. To use a VM diagnostic 56012657Skvn// option, you must first specify +UnlockDiagnosticVMOptions. 56112657Skvn// (This master switch also affects the behavior of -Xprintflags.) 56212657Skvn// 56312657Skvn// experimental flags are in support of features that are not 56412657Skvn// part of the officially supported product, but are available 56512657Skvn// for experimenting with. They could, for example, be performance 56612657Skvn// features that may not have undergone full or rigorous QA, but which may 56712657Skvn// help performance in some cases and released for experimentation 56812657Skvn// by the community of users and developers. This flag also allows one to 56912657Skvn// be able to build a fully supported product that nonetheless also 57012657Skvn// ships with some unsupported, lightly tested, experimental features. 57112657Skvn// Like the UnlockDiagnosticVMOptions flag above, there is a corresponding 57212657Skvn// UnlockExperimentalVMOptions flag, which allows the control and 57312657Skvn// modification of the experimental flags. 57412657Skvn// 57512657Skvn// Nota bene: neither diagnostic nor experimental options should be used casually, 57612657Skvn// and they are not supported on production loads, except under explicit 57712657Skvn// direction from support engineers. 57812657Skvn// 57912657Skvn// manageable flags are writeable external product flags. 58012657Skvn// They are dynamically writeable through the JDK management interface 58112657Skvn// (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole. 58212657Skvn// These flags are external exported interface (see CCC). The list of 58312657Skvn// manageable flags can be queried programmatically through the management 58412657Skvn// interface. 58512657Skvn// 58612657Skvn// A flag can be made as "manageable" only if 58712657Skvn// - the flag is defined in a CCC as an external exported interface. 58812657Skvn// - the VM implementation supports dynamic setting of the flag. 58912657Skvn// This implies that the VM must *always* query the flag variable 59012657Skvn// and not reuse state related to the flag state at any given time. 59112657Skvn// - you want the flag to be queried programmatically by the customers. 59212657Skvn// 59312657Skvn// product_rw flags are writeable internal product flags. 59412657Skvn// They are like "manageable" flags but for internal/private use. 59512657Skvn// The list of product_rw flags are internal/private flags which 59612657Skvn// may be changed/removed in a future release. It can be set 59712657Skvn// through the management interface to get/set value 59812657Skvn// when the name of flag is supplied. 59912657Skvn// 60012657Skvn// A flag can be made as "product_rw" only if 60112657Skvn// - the VM implementation supports dynamic setting of the flag. 60212657Skvn// This implies that the VM must *always* query the flag variable 60312657Skvn// and not reuse state related to the flag state at any given time. 60412657Skvn// 60512657Skvn// Note that when there is a need to support develop flags to be writeable, 60612657Skvn// it can be done in the same way as product_rw. 60712657Skvn// 60812657Skvn// range is a macro that will expand to min and max arguments for range 60912657Skvn// checking code if provided - see commandLineFlagRangeList.hpp 61012657Skvn// 61112657Skvn// constraint is a macro that will expand to custom function call 61212657Skvn// for constraint checking if provided - see commandLineFlagConstraintList.hpp 61312657Skvn// 61412657Skvn 61512657Skvn#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product, range, constraint) \ 61612657Skvn \ 61712657Skvn lp64_product(bool, UseCompressedOops, false, \ 61812657Skvn "Use 32-bit object references in 64-bit VM. " \ 61912657Skvn "lp64_product means flag is always constant in 32 bit VM") \ 62012657Skvn \ 62112657Skvn lp64_product(bool, UseCompressedClassPointers, false, \ 62212657Skvn "Use 32-bit class pointers in 64-bit VM. " \ 62312657Skvn "lp64_product means flag is always constant in 32 bit VM") \ 62412657Skvn \ 62512657Skvn notproduct(bool, CheckCompressedOops, true, \ 62612657Skvn "Generate checks in encoding/decoding code in debug VM") \ 62712657Skvn \ 62812657Skvn product_pd(size_t, HeapBaseMinAddress, \ 62912657Skvn "OS specific low limit for heap base address") \ 63012657Skvn \ 63112657Skvn product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \ 63212657Skvn "Heap allocation steps through preferred address regions to find" \ 63312657Skvn " where it can allocate the heap. Number of steps to take per " \ 63412657Skvn "region.") \ 63512657Skvn range(1, max_uintx) \ 63612657Skvn \ 63712657Skvn diagnostic(bool, PrintCompressedOopsMode, false, \ 63812657Skvn "Print compressed oops base address and encoding mode") \ 63912657Skvn \ 64012657Skvn lp64_product(intx, ObjectAlignmentInBytes, 8, \ 64112657Skvn "Default object alignment in bytes, 8 is minimum") \ 64212657Skvn range(8, 256) \ 64312657Skvn constraint(ObjectAlignmentInBytesConstraintFunc,AtParse) \ 64412657Skvn \ 64512657Skvn product(bool, AssumeMP, false, \ 64612657Skvn "Instruct the VM to assume multiple processors are available") \ 64712657Skvn \ 64812657Skvn /* UseMembar is theoretically a temp flag used for memory barrier */ \ 64912657Skvn /* removal testing. It was supposed to be removed before FCS but has */ \ 65012657Skvn /* been re-added (see 6401008) */ \ 65112657Skvn product_pd(bool, UseMembar, \ 65212657Skvn "(Unstable) Issues membars on thread state transitions") \ 65312657Skvn \ 65412657Skvn develop(bool, CleanChunkPoolAsync, falseInEmbedded, \ 65512657Skvn "Clean the chunk pool asynchronously") \ 65612657Skvn \ 65712657Skvn experimental(bool, AlwaysSafeConstructors, false, \ 65812657Skvn "Force safe construction, as if all fields are final.") \ 65912657Skvn \ 66012657Skvn diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \ 66112657Skvn "Enable normal processing of flags relating to field diagnostics")\ 66212657Skvn \ 66312657Skvn experimental(bool, UnlockExperimentalVMOptions, false, \ 66412657Skvn "Enable normal processing of flags relating to experimental " \ 66512657Skvn "features") \ 66612657Skvn \ 66712657Skvn product(bool, JavaMonitorsInStackTrace, true, \ 66812657Skvn "Print information about Java monitor locks when the stacks are" \ 66912657Skvn "dumped") \ 67012657Skvn \ 67112657Skvn product_pd(bool, UseLargePages, \ 67212657Skvn "Use large page memory") \ 67312657Skvn \ 67412657Skvn product_pd(bool, UseLargePagesIndividualAllocation, \ 67512657Skvn "Allocate large pages individually for better affinity") \ 67612657Skvn \ 67712657Skvn develop(bool, LargePagesIndividualAllocationInjectError, false, \ 67812657Skvn "Fail large pages individual allocation") \ 67912657Skvn \ 68012657Skvn product(bool, UseLargePagesInMetaspace, false, \ 68112657Skvn "Use large page memory in metaspace. " \ 68212657Skvn "Only used if UseLargePages is enabled.") \ 68312657Skvn \ 68412657Skvn develop(bool, TracePageSizes, false, \ 68512657Skvn "Trace page size selection and usage") \ 68612657Skvn \ 68712657Skvn product(bool, UseNUMA, false, \ 68812657Skvn "Use NUMA if available") \ 68912657Skvn \ 69012657Skvn product(bool, UseNUMAInterleaving, false, \ 69112657Skvn "Interleave memory across NUMA nodes if available") \ 69212657Skvn \ 69312657Skvn product(size_t, NUMAInterleaveGranularity, 2*M, \ 69412657Skvn "Granularity to use for NUMA interleaving on Windows OS") \ 69512657Skvn \ 69612657Skvn product(bool, ForceNUMA, false, \ 69712657Skvn "Force NUMA optimizations on single-node/UMA systems") \ 69812657Skvn \ 69912657Skvn product(uintx, NUMAChunkResizeWeight, 20, \ 70012657Skvn "Percentage (0-100) used to weight the current sample when " \ 70112657Skvn "computing exponentially decaying average for " \ 70212657Skvn "AdaptiveNUMAChunkSizing") \ 70312657Skvn range(0, 100) \ 70412657Skvn \ 70512657Skvn product(size_t, NUMASpaceResizeRate, 1*G, \ 70612657Skvn "Do not reallocate more than this amount per collection") \ 70712657Skvn \ 70812657Skvn product(bool, UseAdaptiveNUMAChunkSizing, true, \ 70912657Skvn "Enable adaptive chunk sizing for NUMA") \ 71012657Skvn \ 71112657Skvn product(bool, NUMAStats, false, \ 71212657Skvn "Print NUMA stats in detailed heap information") \ 71312657Skvn \ 71412657Skvn product(uintx, NUMAPageScanRate, 256, \ 71512657Skvn "Maximum number of pages to include in the page scan procedure") \ 71612657Skvn \ 71712657Skvn product_pd(bool, NeedsDeoptSuspend, \ 71812657Skvn "True for register window machines (sparc/ia64)") \ 71912657Skvn \ 72012657Skvn product(intx, UseSSE, 99, \ 72112657Skvn "Highest supported SSE instructions set on x86/x64") \ 72212657Skvn range(0, 99) \ 72312657Skvn \ 72412657Skvn product(bool, UseAES, false, \ 72512657Skvn "Control whether AES instructions can be used on x86/x64") \ 72612657Skvn \ 72712657Skvn product(bool, UseSHA, false, \ 72812657Skvn "Control whether SHA instructions can be used " \ 72912657Skvn "on SPARC and on ARM") \ 73012657Skvn \ 73112657Skvn product(bool, UseGHASHIntrinsics, false, \ 73212657Skvn "Use intrinsics for GHASH versions of crypto") \ 73312657Skvn \ 73412657Skvn product(size_t, LargePageSizeInBytes, 0, \ 73512657Skvn "Large page size (0 to let VM choose the page size)") \ 73612657Skvn \ 73712657Skvn product(size_t, LargePageHeapSizeThreshold, 128*M, \ 73812657Skvn "Use large pages if maximum heap is at least this big") \ 73912657Skvn \ 74012657Skvn product(bool, ForceTimeHighResolution, false, \ 74112657Skvn "Using high time resolution (for Win32 only)") \ 74212657Skvn \ 74312657Skvn develop(bool, TraceItables, false, \ 74412657Skvn "Trace initialization and use of itables") \ 74512657Skvn \ 74612657Skvn develop(bool, TracePcPatching, false, \ 74712657Skvn "Trace usage of frame::patch_pc") \ 74812657Skvn \ 74912657Skvn develop(bool, TraceJumps, false, \ 75012657Skvn "Trace assembly jumps in thread ring buffer") \ 75112657Skvn \ 75212657Skvn develop(bool, TraceRelocator, false, \ 75312657Skvn "Trace the bytecode relocator") \ 75412657Skvn \ 75512657Skvn develop(bool, TraceLongCompiles, false, \ 75612657Skvn "Print out every time compilation is longer than " \ 75712657Skvn "a given threshold") \ 75812657Skvn \ 75912657Skvn develop(bool, SafepointALot, false, \ 76012657Skvn "Generate a lot of safepoints. This works with " \ 76112657Skvn "GuaranteedSafepointInterval") \ 76212657Skvn \ 76312657Skvn product_pd(bool, BackgroundCompilation, \ 76412657Skvn "A thread requesting compilation is not blocked during " \ 76512657Skvn "compilation") \ 76612657Skvn \ 76712657Skvn product(bool, PrintVMQWaitTime, false, \ 76812657Skvn "Print out the waiting time in VM operation queue") \ 76912657Skvn \ 77012657Skvn develop(bool, TraceOopMapGeneration, false, \ 77112657Skvn "Show OopMapGeneration") \ 77212657Skvn \ 77312657Skvn product(bool, MethodFlushing, true, \ 77412657Skvn "Reclamation of zombie and not-entrant methods") \ 77512657Skvn \ 77612657Skvn develop(bool, VerifyStack, false, \ 77712657Skvn "Verify stack of each thread when it is entering a runtime call") \ 77812657Skvn \ 77912657Skvn diagnostic(bool, ForceUnreachable, false, \ 78012657Skvn "Make all non code cache addresses to be unreachable by " \ 78112657Skvn "forcing use of 64bit literal fixups") \ 78212657Skvn \ 78312657Skvn notproduct(bool, StressDerivedPointers, false, \ 78412657Skvn "Force scavenge when a derived pointer is detected on stack " \ 78512657Skvn "after rtm call") \ 78612657Skvn \ 78712657Skvn develop(bool, TraceDerivedPointers, false, \ 78812657Skvn "Trace traversal of derived pointers on stack") \ 78912657Skvn \ 79012657Skvn notproduct(bool, TraceCodeBlobStacks, false, \ 79112657Skvn "Trace stack-walk of codeblobs") \ 79212657Skvn \ 79312657Skvn product(bool, PrintJNIResolving, false, \ 79412657Skvn "Used to implement -v:jni") \ 79512657Skvn \ 79612657Skvn notproduct(bool, PrintRewrites, false, \ 79712657Skvn "Print methods that are being rewritten") \ 79812657Skvn \ 79912657Skvn product(bool, UseInlineCaches, true, \ 80012657Skvn "Use Inline Caches for virtual calls ") \ 80112657Skvn \ 80212657Skvn develop(bool, InlineArrayCopy, true, \ 80312657Skvn "Inline arraycopy native that is known to be part of " \ 80412657Skvn "base library DLL") \ 80512657Skvn \ 80612657Skvn develop(bool, InlineObjectHash, true, \ 80712657Skvn "Inline Object::hashCode() native that is known to be part " \ 80812657Skvn "of base library DLL") \ 80912657Skvn \ 81012657Skvn develop(bool, InlineNatives, true, \ 81112657Skvn "Inline natives that are known to be part of base library DLL") \ 81212657Skvn \ 81312657Skvn develop(bool, InlineMathNatives, true, \ 81412657Skvn "Inline SinD, CosD, etc.") \ 81512657Skvn \ 81613175Siveresov develop(bool, InlineClassNatives, true, \ 81713175Siveresov "Inline Class.isInstance, etc") \ 81813175Siveresov \ 81913175Siveresov develop(bool, InlineThreadNatives, true, \ 82013175Siveresov "Inline Thread.currentThread, etc") \ 82113175Siveresov \ 82213175Siveresov develop(bool, InlineUnsafeOps, true, \ 82313175Siveresov "Inline memory ops (native methods) from Unsafe") \ 82413175Siveresov \ 82513175Siveresov product(bool, CriticalJNINatives, true, \ 82613175Siveresov "Check for critical JNI entry points") \ 82713175Siveresov \ 82813175Siveresov notproduct(bool, StressCriticalJNINatives, false, \ 82913175Siveresov "Exercise register saving code in critical natives") \ 83013175Siveresov \ 83113175Siveresov product(bool, UseSSE42Intrinsics, false, \ 83213175Siveresov "SSE4.2 versions of intrinsics") \ 83313175Siveresov \ 83413175Siveresov product(bool, UseAESIntrinsics, false, \ 83513175Siveresov "Use intrinsics for AES versions of crypto") \ 83613175Siveresov \ 83713175Siveresov product(bool, UseSHA1Intrinsics, false, \ 83813175Siveresov "Use intrinsics for SHA-1 crypto hash function. " \ 83913175Siveresov "Requires that UseSHA is enabled.") \ 84013175Siveresov \ 84113175Siveresov product(bool, UseSHA256Intrinsics, false, \ 84213175Siveresov "Use intrinsics for SHA-224 and SHA-256 crypto hash functions. " \ 84313175Siveresov "Requires that UseSHA is enabled.") \ 84413175Siveresov \ 84513175Siveresov product(bool, UseSHA512Intrinsics, false, \ 84613175Siveresov "Use intrinsics for SHA-384 and SHA-512 crypto hash functions. " \ 84713175Siveresov "Requires that UseSHA is enabled.") \ 84813175Siveresov \ 84913175Siveresov product(bool, UseCRC32Intrinsics, false, \ 85013175Siveresov "use intrinsics for java.util.zip.CRC32") \ 85113175Siveresov \ 85213175Siveresov product(bool, UseCRC32CIntrinsics, false, \ 85313175Siveresov "use intrinsics for java.util.zip.CRC32C") \ 85413175Siveresov \ 85513175Siveresov product(bool, UseAdler32Intrinsics, false, \ 85613175Siveresov "use intrinsics for java.util.zip.Adler32") \ 85713175Siveresov \ 85813175Siveresov diagnostic(ccstrlist, DisableIntrinsic, "", \ 85913175Siveresov "do not expand intrinsics whose (internal) names appear here") \ 86013175Siveresov \ 86113175Siveresov develop(bool, TraceCallFixup, false, \ 86213175Siveresov "Trace all call fixups") \ 86313175Siveresov \ 86413175Siveresov develop(bool, DeoptimizeALot, false, \ 86513175Siveresov "Deoptimize at every exit from the runtime system") \ 86613175Siveresov \ 86713175Siveresov notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ 86813175Siveresov "A comma separated list of bcis to deoptimize at") \ 86913175Siveresov \ 87013175Siveresov product(bool, DeoptimizeRandom, false, \ 87113175Siveresov "Deoptimize random frames on random exit from the runtime system")\ 87213175Siveresov \ 87313175Siveresov notproduct(bool, ZombieALot, false, \ 87413175Siveresov "Create zombies (non-entrant) at exit from the runtime system") \ 87513175Siveresov \ 87613175Siveresov product(bool, UnlinkSymbolsALot, false, \ 87713175Siveresov "Unlink unreferenced symbols from the symbol table at safepoints")\ 87813175Siveresov \ 87913175Siveresov notproduct(bool, WalkStackALot, false, \ 88013175Siveresov "Trace stack (no print) at every exit from the runtime system") \ 88113175Siveresov \ 88213175Siveresov product(bool, Debugging, false, \ 88313175Siveresov "Set when executing debug methods in debug.cpp " \ 88413175Siveresov "(to prevent triggering assertions)") \ 88513175Siveresov \ 88613175Siveresov notproduct(bool, StrictSafepointChecks, trueInDebug, \ 88713175Siveresov "Enable strict checks that safepoints cannot happen for threads " \ 88813175Siveresov "that use No_Safepoint_Verifier") \ 88913175Siveresov \ 89013175Siveresov notproduct(bool, VerifyLastFrame, false, \ 89113175Siveresov "Verify oops on last frame on entry to VM") \ 89212657Skvn \ 89312657Skvn develop(bool, TraceHandleAllocation, false, \ 89412657Skvn "Print out warnings when suspiciously many handles are allocated")\ 89512657Skvn \ 89612657Skvn product(bool, FailOverToOldVerifier, true, \ 89712657Skvn "Fail over to old verifier when split verifier fails") \ 89812657Skvn \ 89912657Skvn develop(bool, ShowSafepointMsgs, false, \ 90012657Skvn "Show message about safepoint synchronization") \ 90112657Skvn \ 90212657Skvn product(bool, SafepointTimeout, false, \ 90312657Skvn "Time out and warn or fail after SafepointTimeoutDelay " \ 90412657Skvn "milliseconds if failed to reach safepoint") \ 90512657Skvn \ 90612657Skvn develop(bool, DieOnSafepointTimeout, false, \ 90712657Skvn "Die upon failure to reach safepoint (see SafepointTimeout)") \ 90812657Skvn \ 90912657Skvn /* 50 retries * (5 * current_retry_count) millis = ~6.375 seconds */ \ 91012657Skvn /* typically, at most a few retries are needed */ \ 91112657Skvn product(intx, SuspendRetryCount, 50, \ 91212657Skvn "Maximum retry count for an external suspend request") \ 91312657Skvn range(0, max_intx) \ 91412657Skvn \ 91512657Skvn product(intx, SuspendRetryDelay, 5, \ 91612657Skvn "Milliseconds to delay per retry (* current_retry_count)") \ 91712657Skvn range(0, max_intx) \ 91812657Skvn \ 91912657Skvn product(bool, AssertOnSuspendWaitFailure, false, \ 92012657Skvn "Assert/Guarantee on external suspend wait failure") \ 92112657Skvn \ 92212657Skvn product(bool, TraceSuspendWaitFailures, false, \ 92312657Skvn "Trace external suspend wait failures") \ 92412657Skvn \ 92512657Skvn product(bool, MaxFDLimit, true, \ 92612657Skvn "Bump the number of file descriptors to maximum in Solaris") \ 92712657Skvn \ 92812657Skvn diagnostic(bool, LogEvents, true, \ 92912657Skvn "Enable the various ring buffer event logs") \ 93012657Skvn \ 93112657Skvn diagnostic(uintx, LogEventsBufferEntries, 10, \ 93212657Skvn "Number of ring buffer event logs") \ 93312657Skvn range(1, NOT_LP64(1*K) LP64_ONLY(1*M)) \ 93412657Skvn \ 93512657Skvn product(bool, BytecodeVerificationRemote, true, \ 93612657Skvn "Enable the Java bytecode verifier for remote classes") \ 93712657Skvn \ 93812657Skvn product(bool, BytecodeVerificationLocal, false, \ 93912657Skvn "Enable the Java bytecode verifier for local classes") \ 94012657Skvn \ 94112657Skvn develop(bool, ForceFloatExceptions, trueInDebug, \ 94212657Skvn "Force exceptions on FP stack under/overflow") \ 94312657Skvn \ 94412657Skvn develop(bool, VerifyStackAtCalls, false, \ 94512657Skvn "Verify that the stack pointer is unchanged after calls") \ 94612657Skvn \ 94712657Skvn develop(bool, TraceJavaAssertions, false, \ 94812657Skvn "Trace java language assertions") \ 94912657Skvn \ 95012657Skvn notproduct(bool, CheckAssertionStatusDirectives, false, \ 95112657Skvn "Temporary - see javaClasses.cpp") \ 95212657Skvn \ 95312657Skvn notproduct(bool, PrintMallocFree, false, \ 95412657Skvn "Trace calls to C heap malloc/free allocation") \ 95512657Skvn \ 95612657Skvn product(bool, PrintOopAddress, false, \ 95712657Skvn "Always print the location of the oop") \ 95812657Skvn \ 95912657Skvn notproduct(bool, VerifyCodeCache, false, \ 96012657Skvn "Verify code cache on memory allocation/deallocation") \ 96112657Skvn \ 96212657Skvn develop(bool, UseMallocOnly, false, \ 96312657Skvn "Use only malloc/free for allocation (no resource area/arena)") \ 96412657Skvn \ 96512657Skvn develop(bool, PrintMalloc, false, \ 96612657Skvn "Print all malloc/free calls") \ 96712657Skvn \ 96812657Skvn develop(bool, PrintMallocStatistics, false, \ 96912657Skvn "Print malloc/free statistics") \ 97012657Skvn \ 97112657Skvn develop(bool, ZapResourceArea, trueInDebug, \ 97212657Skvn "Zap freed resource/arena space with 0xABABABAB") \ 97312657Skvn \ 97412657Skvn notproduct(bool, ZapVMHandleArea, trueInDebug, \ 97512657Skvn "Zap freed VM handle space with 0xBCBCBCBC") \ 97612657Skvn \ 97712657Skvn develop(bool, ZapJNIHandleArea, trueInDebug, \ 97812657Skvn "Zap freed JNI handle space with 0xFEFEFEFE") \ 97912657Skvn \ 98012657Skvn notproduct(bool, ZapStackSegments, trueInDebug, \ 98112657Skvn "Zap allocated/freed stack segments with 0xFADFADED") \ 98212657Skvn \ 98312657Skvn develop(bool, ZapUnusedHeapArea, trueInDebug, \ 98412657Skvn "Zap unused heap space with 0xBAADBABE") \ 98512657Skvn \ 98612657Skvn develop(bool, TraceZapUnusedHeapArea, false, \ 98712657Skvn "Trace zapping of unused heap space") \ 98812657Skvn \ 98912657Skvn develop(bool, CheckZapUnusedHeapArea, false, \ 99012657Skvn "Check zapping of unused heap space") \ 99112657Skvn \ 99212657Skvn develop(bool, ZapFillerObjects, trueInDebug, \ 99312657Skvn "Zap filler objects with 0xDEAFBABE") \ 99412657Skvn \ 99512657Skvn develop(bool, PrintVMMessages, true, \ 99612657Skvn "Print VM messages on console") \ 99712657Skvn \ 99812657Skvn product(bool, PrintGCApplicationConcurrentTime, false, \ 99912657Skvn "Print the time the application has been running") \ 100012657Skvn \ 100112657Skvn product(bool, PrintGCApplicationStoppedTime, false, \ 100212657Skvn "Print the time the application has been stopped") \ 100312657Skvn \ 100412657Skvn diagnostic(bool, VerboseVerification, false, \ 100512657Skvn "Display detailed verification details") \ 100612657Skvn \ 100712657Skvn notproduct(uintx, ErrorHandlerTest, 0, \ 100812657Skvn "If > 0, provokes an error after VM initialization; the value " \ 100912657Skvn "determines which error to provoke. See test_error_handler() " \ 101012657Skvn "in debug.cpp.") \ 101112657Skvn \ 101212657Skvn notproduct(uintx, TestCrashInErrorHandler, 0, \ 101312657Skvn "If > 0, provokes an error inside VM error handler (a secondary " \ 101413264Siveresov "crash). see test_error_handler() in debug.cpp.") \ 101513264Siveresov \ 101613264Siveresov notproduct(bool, TestSafeFetchInErrorHandler, false, \ 101713264Siveresov "If true, tests SafeFetch inside error handler.") \ 101813264Siveresov \ 101913264Siveresov develop(bool, Verbose, false, \ 102013264Siveresov "Print additional debugging information from other modes") \ 102113264Siveresov \ 102213264Siveresov develop(bool, PrintMiscellaneous, false, \ 102313264Siveresov "Print uncategorized debugging information (requires +Verbose)") \ 102413264Siveresov \ 102513264Siveresov develop(bool, WizardMode, false, \ 102613264Siveresov "Print much more debugging information") \ 102713264Siveresov \ 102813264Siveresov product(bool, ShowMessageBoxOnError, false, \ 102913264Siveresov "Keep process alive on VM fatal error") \ 103013264Siveresov \ 103113264Siveresov product(bool, CreateCoredumpOnCrash, true, \ 103213264Siveresov "Create core/mini dump on VM fatal error") \ 103313264Siveresov \ 103413264Siveresov product(uint64_t, ErrorLogTimeout, 2 * 60, \ 103513264Siveresov "Timeout, in seconds, to limit the time spent on writing an " \ 103613264Siveresov "error log in case of a crash.") \ 103713264Siveresov range(0, (uint64_t)max_jlong/1000) \ 103813264Siveresov \ 103913264Siveresov product_pd(bool, UseOSErrorReporting, \ 104013264Siveresov "Let VM fatal error propagate to the OS (ie. WER on Windows)") \ 104113264Siveresov \ 104213264Siveresov product(bool, SuppressFatalErrorMessage, false, \ 104313264Siveresov "Report NO fatal error message (avoid deadlock)") \ 104413264Siveresov \ 104513264Siveresov product(ccstrlist, OnError, "", \ 104613264Siveresov "Run user-defined commands on fatal error; see VMError.cpp " \ 104713264Siveresov "for examples") \ 104813264Siveresov \ 104913264Siveresov product(ccstrlist, OnOutOfMemoryError, "", \ 105013264Siveresov "Run user-defined commands on first java.lang.OutOfMemoryError") \ 105113264Siveresov \ 105213264Siveresov manageable(bool, HeapDumpBeforeFullGC, false, \ 105313264Siveresov "Dump heap to file before any major stop-the-world GC") \ 105413264Siveresov \ 105513264Siveresov manageable(bool, HeapDumpAfterFullGC, false, \ 105613264Siveresov "Dump heap to file after any major stop-the-world GC") \ 105713264Siveresov \ 105813264Siveresov manageable(bool, HeapDumpOnOutOfMemoryError, false, \ 105913264Siveresov "Dump heap to file when java.lang.OutOfMemoryError is thrown") \ 106013264Siveresov \ 106113264Siveresov manageable(ccstr, HeapDumpPath, NULL, \ 106213264Siveresov "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ 106313264Siveresov "directory) of the dump file (defaults to java_pid<pid>.hprof " \ 106413264Siveresov "in the working directory)") \ 106513264Siveresov \ 106613264Siveresov develop(size_t, SegmentedHeapDumpThreshold, 2*G, \ 106713264Siveresov "Generate a segmented heap dump (JAVA PROFILE 1.0.2 format) " \ 106813264Siveresov "when the heap usage is larger than this") \ 106913264Siveresov \ 107013264Siveresov develop(size_t, HeapDumpSegmentSize, 1*G, \ 107113264Siveresov "Approximate segment size when generating a segmented heap dump") \ 107213264Siveresov \ 107313264Siveresov develop(bool, BreakAtWarning, false, \ 107413264Siveresov "Execute breakpoint upon encountering VM warning") \ 107513264Siveresov \ 107613264Siveresov develop(bool, UseFakeTimers, false, \ 107713264Siveresov "Tell whether the VM should use system time or a fake timer") \ 107813264Siveresov \ 107913264Siveresov product(ccstr, NativeMemoryTracking, "off", \ 108013264Siveresov "Native memory tracking options") \ 108113264Siveresov \ 108213264Siveresov diagnostic(bool, PrintNMTStatistics, false, \ 108313264Siveresov "Print native memory tracking summary data if it is on") \ 108413264Siveresov \ 108513264Siveresov diagnostic(bool, LogCompilation, false, \ 108613264Siveresov "Log compilation activity in detail to LogFile") \ 108713264Siveresov \ 108813264Siveresov product(bool, PrintCompilation, false, \ 108913264Siveresov "Print compilations") \ 109013264Siveresov \ 109113264Siveresov diagnostic(bool, TraceNMethodInstalls, false, \ 109213264Siveresov "Trace nmethod installation") \ 109313264Siveresov \ 109413264Siveresov diagnostic(intx, ScavengeRootsInCode, 2, \ 109513264Siveresov "0: do not allow scavengable oops in the code cache; " \ 109613264Siveresov "1: allow scavenging from the code cache; " \ 109713264Siveresov "2: emit as many constants as the compiler can see") \ 109813264Siveresov range(0, 2) \ 109913264Siveresov \ 110013264Siveresov product(bool, AlwaysRestoreFPU, false, \ 110113264Siveresov "Restore the FPU control word after every JNI call (expensive)") \ 110213264Siveresov \ 110313264Siveresov diagnostic(bool, PrintCompilation2, false, \ 110413264Siveresov "Print additional statistics per compilation") \ 110513264Siveresov \ 110613264Siveresov diagnostic(bool, PrintAdapterHandlers, false, \ 110713264Siveresov "Print code generated for i2c/c2i adapters") \ 110812657Skvn \ 110912657Skvn diagnostic(bool, VerifyAdapterCalls, trueInDebug, \ 111012657Skvn "Verify that i2c/c2i adapters are called properly") \ 111112657Skvn \ 111212657Skvn develop(bool, VerifyAdapterSharing, false, \ 111312657Skvn "Verify that the code for shared adapters is the equivalent") \ 111412657Skvn \ 111512657Skvn diagnostic(bool, PrintAssembly, false, \ 111612657Skvn "Print assembly code (using external disassembler.so)") \ 111712657Skvn \ 111812657Skvn diagnostic(ccstr, PrintAssemblyOptions, NULL, \ 111912657Skvn "Print options string passed to disassembler.so") \ 112012657Skvn \ 112112657Skvn notproduct(bool, PrintNMethodStatistics, false, \ 112212657Skvn "Print a summary statistic for the generated nmethods") \ 112312657Skvn \ 112412657Skvn diagnostic(bool, PrintNMethods, false, \ 112512657Skvn "Print assembly code for nmethods when generated") \ 112612657Skvn \ 112712657Skvn diagnostic(intx, PrintNMethodsAtLevel, -1, \ 112812657Skvn "Only print code for nmethods at the given compilation level") \ 112912657Skvn \ 113012657Skvn diagnostic(bool, PrintNativeNMethods, false, \ 113112657Skvn "Print assembly code for native nmethods when generated") \ 113212657Skvn \ 113312657Skvn develop(bool, PrintDebugInfo, false, \ 113412657Skvn "Print debug information for all nmethods when generated") \ 113512657Skvn \ 113612657Skvn develop(bool, PrintRelocations, false, \ 113712657Skvn "Print relocation information for all nmethods when generated") \ 113812657Skvn \ 113912657Skvn develop(bool, PrintDependencies, false, \ 114012657Skvn "Print dependency information for all nmethods when generated") \ 114112657Skvn \ 114212657Skvn develop(bool, PrintExceptionHandlers, false, \ 114312657Skvn "Print exception handler tables for all nmethods when generated") \ 114412657Skvn \ 114512657Skvn develop(bool, StressCompiledExceptionHandlers, false, \ 114612657Skvn "Exercise compiled exception handlers") \ 114712657Skvn \ 114812657Skvn develop(bool, InterceptOSException, false, \ 114912657Skvn "Start debugger when an implicit OS (e.g. NULL) " \ 115012657Skvn "exception happens") \ 115112657Skvn \ 115212657Skvn product(bool, PrintCodeCache, false, \ 115312657Skvn "Print the code cache memory usage when exiting") \ 115412657Skvn \ 115512657Skvn develop(bool, PrintCodeCache2, false, \ 115612657Skvn "Print detailed usage information on the code cache when exiting")\ 115712657Skvn \ 115812657Skvn product(bool, PrintCodeCacheOnCompilation, false, \ 115912657Skvn "Print the code cache memory usage each time a method is " \ 116012657Skvn "compiled") \ 116112968Siveresov \ 116212968Siveresov diagnostic(bool, PrintStubCode, false, \ 116312968Siveresov "Print generated stub code") \ 116412657Skvn \ 116512657Skvn product(bool, StackTraceInThrowable, true, \ 116612657Skvn "Collect backtrace in throwable when exception happens") \ 116712657Skvn \ 116812657Skvn product(bool, OmitStackTraceInFastThrow, true, \ 116912657Skvn "Omit backtraces for some 'hot' exceptions in optimized code") \ 117012657Skvn \ 117112657Skvn product(bool, ProfilerPrintByteCodeStatistics, false, \ 117212657Skvn "Print bytecode statistics when dumping profiler output") \ 117312657Skvn \ 117412657Skvn product(bool, ProfilerRecordPC, false, \ 117512657Skvn "Collect ticks for each 16 byte interval of compiled code") \ 117612657Skvn \ 117712657Skvn product(bool, ProfileVM, false, \ 117812657Skvn "Profile ticks that fall within VM (either in the VM Thread " \ 117912657Skvn "or VM code called through stubs)") \ 118012657Skvn \ 118112968Siveresov product(bool, ProfileIntervals, false, \ 118212968Siveresov "Print profiles for each interval (see ProfileIntervalsTicks)") \ 118312968Siveresov \ 118412657Skvn notproduct(bool, ProfilerCheckIntervals, false, \ 118512968Siveresov "Collect and print information on spacing of profiler ticks") \ 118612968Siveresov \ 118712968Siveresov product(bool, PrintWarnings, true, \ 118812968Siveresov "Print JVM warnings to output stream") \ 118912968Siveresov \ 119012968Siveresov notproduct(uintx, WarnOnStalledSpinLock, 0, \ 119112968Siveresov "Print warnings for stalled SpinLocks") \ 119212968Siveresov \ 119312657Skvn product(bool, RegisterFinalizersAtInit, true, \ 119412657Skvn "Register finalizable objects at end of Object.<init> or " \ 119512657Skvn "after allocation") \ 119612657Skvn \ 119712657Skvn develop(bool, RegisterReferences, true, \ 119812657Skvn "Tell whether the VM should register soft/weak/final/phantom " \ 119912657Skvn "references") \ 120012657Skvn \ 120112657Skvn develop(bool, IgnoreRewrites, false, \ 120212657Skvn "Suppress rewrites of bytecodes in the oopmap generator. " \ 120312657Skvn "This is unsafe!") \ 120412657Skvn \ 120512657Skvn develop(bool, PrintCodeCacheExtension, false, \ 120612657Skvn "Print extension of code cache") \ 120712657Skvn \ 120812657Skvn develop(bool, UsePrivilegedStack, true, \ 120912657Skvn "Enable the security JVM functions") \ 121012657Skvn \ 121112657Skvn develop(bool, ProtectionDomainVerification, true, \ 121212657Skvn "Verify protection domain before resolution in system dictionary")\ 121312657Skvn \ 121412657Skvn product(bool, ClassUnloading, true, \ 121512657Skvn "Do unloading of classes") \ 121612657Skvn \ 121712657Skvn product(bool, ClassUnloadingWithConcurrentMark, true, \ 121812657Skvn "Do unloading of classes with a concurrent marking cycle") \ 121912657Skvn \ 122012657Skvn develop(bool, DisableStartThread, false, \ 122112657Skvn "Disable starting of additional Java threads " \ 122212657Skvn "(for debugging only)") \ 122312657Skvn \ 122412657Skvn develop(bool, MemProfiling, false, \ 122512657Skvn "Write memory usage profiling to log file") \ 122612657Skvn \ 122712657Skvn notproduct(bool, PrintSystemDictionaryAtExit, false, \ 122812657Skvn "Print the system dictionary at exit") \ 122912657Skvn \ 123012657Skvn experimental(intx, PredictedLoadedClassCount, 0, \ 123112657Skvn "Experimental: Tune loaded class cache starting size") \ 123212657Skvn \ 123312657Skvn diagnostic(bool, UnsyncloadClass, false, \ 123412657Skvn "Unstable: VM calls loadClass unsynchronized. Custom " \ 123512657Skvn "class loader must call VM synchronized for findClass " \ 123612657Skvn "and defineClass.") \ 123712657Skvn \ 123812657Skvn product(bool, AlwaysLockClassLoader, false, \ 123912657Skvn "Require the VM to acquire the class loader lock before calling " \ 124012657Skvn "loadClass() even for class loaders registering " \ 124112657Skvn "as parallel capable") \ 124212657Skvn \ 124312657Skvn product(bool, AllowParallelDefineClass, false, \ 124412657Skvn "Allow parallel defineClass requests for class loaders " \ 124512657Skvn "registering as parallel capable") \ 124612657Skvn \ 124712657Skvn product(bool, MustCallLoadClassInternal, false, \ 124812657Skvn "Call loadClassInternal() rather than loadClass()") \ 124912657Skvn \ 125012657Skvn product_pd(bool, DontYieldALot, \ 125112657Skvn "Throw away obvious excess yield calls") \ 125212657Skvn \ 125312657Skvn product_pd(bool, ConvertSleepToYield, \ 125412657Skvn "Convert sleep(0) to thread yield " \ 125512657Skvn "(may be off for Solaris to improve GUI)") \ 125612657Skvn \ 125712657Skvn product(bool, ConvertYieldToSleep, false, \ 125812657Skvn "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \ 125912657Skvn "behavior") \ 126012657Skvn \ 126112657Skvn develop(bool, UseDetachedThreads, true, \ 126212657Skvn "Use detached threads that are recycled upon termination " \ 126312657Skvn "(for Solaris only)") \ 126412657Skvn \ 126512657Skvn product(bool, UseLWPSynchronization, true, \ 126612657Skvn "Use LWP-based instead of libthread-based synchronization " \ 126712657Skvn "(SPARC only)") \ 126812657Skvn \ 126912657Skvn experimental(ccstr, SyncKnobs, NULL, \ 127012657Skvn "(Unstable) Various monitor synchronization tunables") \ 127112657Skvn \ 127212657Skvn experimental(intx, EmitSync, 0, \ 127312657Skvn "(Unsafe, Unstable) " \ 127412657Skvn "Control emission of inline sync fast-path code") \ 127512657Skvn \ 127612657Skvn product(intx, MonitorBound, 0, "Bound Monitor population") \ 127712657Skvn range(0, max_jint) \ 127812657Skvn \ 127912657Skvn product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \ 128012657Skvn \ 128112657Skvn experimental(intx, SyncFlags, 0, "(Unsafe, Unstable) " \ 128212657Skvn "Experimental Sync flags") \ 128312657Skvn \ 128412657Skvn experimental(intx, SyncVerbose, 0, "(Unstable)") \ 128512657Skvn \ 128612657Skvn diagnostic(bool, InlineNotify, true, "intrinsify subset of notify") \ 128712657Skvn \ 128812657Skvn experimental(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \ 128912657Skvn \ 129012657Skvn experimental(intx, hashCode, 5, \ 129112657Skvn "(Unstable) select hashCode generation algorithm") \ 129212657Skvn \ 129312657Skvn experimental(intx, WorkAroundNPTLTimedWaitHang, 0, \ 129412657Skvn "(Unstable, Linux-specific) " \ 129512657Skvn "avoid NPTL-FUTEX hang pthread_cond_timedwait") \ 129612657Skvn \ 129712657Skvn product(bool, FilterSpuriousWakeups, true, \ 129812657Skvn "When true prevents OS-level spurious, or premature, wakeups " \ 129912657Skvn "from Object.wait (Ignored for Windows)") \ 130012657Skvn \ 130112657Skvn experimental(intx, NativeMonitorTimeout, -1, "(Unstable)") \ 130212657Skvn \ 130312657Skvn experimental(intx, NativeMonitorFlags, 0, "(Unstable)") \ 130412657Skvn \ 130512657Skvn experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \ 130612657Skvn \ 130712657Skvn develop(bool, UsePthreads, false, \ 130812657Skvn "Use pthread-based instead of libthread-based synchronization " \ 130912657Skvn "(SPARC only)") \ 131012657Skvn \ 131112657Skvn product(bool, ReduceSignalUsage, false, \ 131212657Skvn "Reduce the use of OS signals in Java and/or the VM") \ 131312657Skvn \ 131412657Skvn develop_pd(bool, ShareVtableStubs, \ 131512657Skvn "Share vtable stubs (smaller code but worse branch prediction") \ 131612657Skvn \ 131712657Skvn develop(bool, LoadLineNumberTables, true, \ 131812657Skvn "Tell whether the class file parser loads line number tables") \ 131912657Skvn \ 132012657Skvn develop(bool, LoadLocalVariableTables, true, \ 132112657Skvn "Tell whether the class file parser loads local variable tables") \ 132212657Skvn \ 132312657Skvn develop(bool, LoadLocalVariableTypeTables, true, \ 132412657Skvn "Tell whether the class file parser loads local variable type" \ 132512657Skvn "tables") \ 132612657Skvn \ 132712657Skvn product(bool, AllowUserSignalHandlers, false, \ 132812657Skvn "Do not complain if the application installs signal handlers " \ 132912657Skvn "(Solaris & Linux only)") \ 133012657Skvn \ 133112657Skvn product(bool, UseSignalChaining, true, \ 133212657Skvn "Use signal-chaining to invoke signal handlers installed " \ 133312657Skvn "by the application (Solaris & Linux only)") \ 133412657Skvn \ 133512657Skvn product(bool, AllowJNIEnvProxy, false, \ 133612657Skvn "Allow JNIEnv proxies for jdbx") \ 133712657Skvn \ 133812657Skvn product(bool, RestoreMXCSROnJNICalls, false, \ 133912657Skvn "Restore MXCSR when returning from JNI calls") \ 134012657Skvn \ 134112657Skvn product(bool, CheckJNICalls, false, \ 134212657Skvn "Verify all arguments to JNI calls") \ 134312657Skvn \ 134412657Skvn product(bool, CheckEndorsedAndExtDirs, false, \ 134512657Skvn "Verify the endorsed and extension directories are not used") \ 134612657Skvn \ 134712657Skvn product(bool, UseFastJNIAccessors, true, \ 134812657Skvn "Use optimized versions of Get<Primitive>Field") \ 134912657Skvn \ 135012657Skvn product(intx, MaxJNILocalCapacity, 65536, \ 135112657Skvn "Maximum allowable local JNI handle capacity to " \ 135212657Skvn "EnsureLocalCapacity() and PushLocalFrame(), " \ 135312657Skvn "where <= 0 is unlimited, default: 65536") \ 135412657Skvn range(min_intx, max_intx) \ 135512657Skvn \ 135612657Skvn product(bool, EagerXrunInit, false, \ 135712657Skvn "Eagerly initialize -Xrun libraries; allows startup profiling, " \ 135812657Skvn "but not all -Xrun libraries may support the state of the VM " \ 135912657Skvn "at this time") \ 136012657Skvn \ 136112657Skvn product(bool, PreserveAllAnnotations, false, \ 136212657Skvn "Preserve RuntimeInvisibleAnnotations as well " \ 136312657Skvn "as RuntimeVisibleAnnotations") \ 136412657Skvn \ 136512657Skvn develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ 136612657Skvn "Number of OutOfMemoryErrors preallocated with backtrace") \ 136712657Skvn \ 136812657Skvn product(bool, UseXMMForArrayCopy, false, \ 136912657Skvn "Use SSE2 MOVQ instruction for Arraycopy") \ 137012657Skvn \ 137112657Skvn product(intx, FieldsAllocationStyle, 1, \ 137212657Skvn "0 - type based with oops first, " \ 137312657Skvn "1 - with oops last, " \ 137412657Skvn "2 - oops in super and sub classes are together") \ 137512657Skvn range(0, 2) \ 137612657Skvn \ 137712657Skvn product(bool, CompactFields, true, \ 137812657Skvn "Allocate nonstatic fields in gaps between previous fields") \ 137912657Skvn \ 138012657Skvn notproduct(bool, PrintFieldLayout, false, \ 138112657Skvn "Print field layout for each class") \ 138212657Skvn \ 138312657Skvn /* Need to limit the extent of the padding to reasonable size. */\ 138412657Skvn /* 8K is well beyond the reasonable HW cache line size, even with */\ 138512657Skvn /* aggressive prefetching, while still leaving the room for segregating */\ 138612657Skvn /* among the distinct pages. */\ 138712657Skvn product(intx, ContendedPaddingWidth, 128, \ 138812657Skvn "How many bytes to pad the fields/classes marked @Contended with")\ 138912657Skvn range(0, 8192) \ 139012657Skvn constraint(ContendedPaddingWidthConstraintFunc,AfterErgo) \ 139112657Skvn \ 139212657Skvn product(bool, EnableContended, true, \ 139312657Skvn "Enable @Contended annotation support") \ 139412657Skvn \ 139512657Skvn product(bool, RestrictContended, true, \ 139612657Skvn "Restrict @Contended to trusted classes") \ 139712657Skvn \ 139812657Skvn product(bool, UseBiasedLocking, true, \ 139912657Skvn "Enable biased locking in JVM") \ 140012657Skvn \ 140112657Skvn product(intx, BiasedLockingStartupDelay, 4000, \ 140212657Skvn "Number of milliseconds to wait before enabling biased locking") \ 140312657Skvn range(0, (intx)(max_jint-(max_jint%PeriodicTask::interval_gran))) \ 140412657Skvn constraint(BiasedLockingStartupDelayFunc,AfterErgo) \ 140512657Skvn \ 140612657Skvn diagnostic(bool, PrintBiasedLockingStatistics, false, \ 140712657Skvn "Print statistics of biased locking in JVM") \ 140812657Skvn \ 140912657Skvn product(intx, BiasedLockingBulkRebiasThreshold, 20, \ 141012657Skvn "Threshold of number of revocations per type to try to " \ 141112657Skvn "rebias all objects in the heap of that type") \ 141212657Skvn range(0, max_intx) \ 141312657Skvn constraint(BiasedLockingBulkRebiasThresholdFunc,AfterErgo) \ 141412657Skvn \ 141512657Skvn product(intx, BiasedLockingBulkRevokeThreshold, 40, \ 141612657Skvn "Threshold of number of revocations per type to permanently " \ 141712657Skvn "revoke biases of all objects in the heap of that type") \ 141812657Skvn range(0, max_intx) \ 141912657Skvn constraint(BiasedLockingBulkRevokeThresholdFunc,AfterErgo) \ 142012657Skvn \ 142112657Skvn product(intx, BiasedLockingDecayTime, 25000, \ 142212657Skvn "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \ 142312657Skvn "type after previous bulk rebias") \ 142412657Skvn range(500, max_intx) \ 142512657Skvn constraint(BiasedLockingDecayTimeFunc,AfterErgo) \ 142612657Skvn \ 142712657Skvn /* tracing */ \ 142812657Skvn \ 142912657Skvn notproduct(bool, TraceRuntimeCalls, false, \ 143012657Skvn "Trace run-time calls") \ 143112657Skvn \ 143212657Skvn develop(bool, TraceJNICalls, false, \ 143312657Skvn "Trace JNI calls") \ 143412657Skvn \ 143512657Skvn develop(bool, StressRewriter, false, \ 143612657Skvn "Stress linktime bytecode rewriting") \ 143712657Skvn \ 143812657Skvn notproduct(bool, TraceJVMCalls, false, \ 143912657Skvn "Trace JVM calls") \ 144012657Skvn \ 144112657Skvn product(ccstr, TraceJVMTI, NULL, \ 144212657Skvn "Trace flags for JVMTI functions and events") \ 144312657Skvn \ 144412657Skvn /* This option can change an EMCP method into an obsolete method. */ \ 144512657Skvn /* This can affect tests that except specific methods to be EMCP. */ \ 144612657Skvn /* This option should be used with caution. */ \ 144712657Skvn product(bool, StressLdcRewrite, false, \ 144812657Skvn "Force ldc -> ldc_w rewrite during RedefineClasses") \ 144912657Skvn \ 145012657Skvn product(uintx, TraceRedefineClasses, 0, \ 145112657Skvn "Trace level for JVMTI RedefineClasses") \ 145212657Skvn range(0, 0xFFFFFFFF) \ 145312657Skvn \ 145412657Skvn /* change to false by default sometime after Mustang */ \ 145512657Skvn product(bool, VerifyMergedCPBytecodes, true, \ 145612657Skvn "Verify bytecodes after RedefineClasses constant pool merging") \ 145712657Skvn \ 145812657Skvn develop(bool, TraceJNIHandleAllocation, false, \ 145912657Skvn "Trace allocation/deallocation of JNI handle blocks") \ 146012657Skvn \ 146112657Skvn develop(bool, TraceBytecodes, false, \ 146212657Skvn "Trace bytecode execution") \ 146312657Skvn \ 146412657Skvn develop(bool, TraceClassInitialization, false, \ 146512657Skvn "Trace class initialization") \ 146612657Skvn \ 146712657Skvn product(bool, TraceExceptions, false, \ 146812657Skvn "Trace exceptions") \ 146912657Skvn \ 147012657Skvn develop(bool, TraceICs, false, \ 147112657Skvn "Trace inline cache changes") \ 147212657Skvn \ 147312657Skvn notproduct(bool, TraceInvocationCounterOverflow, false, \ 147412657Skvn "Trace method invocation counter overflow") \ 147512657Skvn \ 147612657Skvn develop(bool, TraceInlineCacheClearing, false, \ 147712657Skvn "Trace clearing of inline caches in nmethods") \ 147812657Skvn \ 147912657Skvn develop(bool, TraceDependencies, false, \ 148012657Skvn "Trace dependencies") \ 148112657Skvn \ 148212657Skvn develop(bool, VerifyDependencies, trueInDebug, \ 148312657Skvn "Exercise and verify the compilation dependency mechanism") \ 148412657Skvn \ 148512657Skvn develop(bool, TraceNewOopMapGeneration, false, \ 148612657Skvn "Trace OopMapGeneration") \ 148712657Skvn \ 148812657Skvn develop(bool, TraceNewOopMapGenerationDetailed, false, \ 148912657Skvn "Trace OopMapGeneration: print detailed cell states") \ 149012657Skvn \ 149112657Skvn develop(bool, TimeOopMap, false, \ 149212657Skvn "Time calls to GenerateOopMap::compute_map() in sum") \ 149312657Skvn \ 149412657Skvn develop(bool, TimeOopMap2, false, \ 149512657Skvn "Time calls to GenerateOopMap::compute_map() individually") \ 149612657Skvn \ 149712657Skvn develop(bool, TraceMonitorMismatch, false, \ 149812657Skvn "Trace monitor matching failures during OopMapGeneration") \ 149912657Skvn \ 150012657Skvn develop(bool, TraceOopMapRewrites, false, \ 150112657Skvn "Trace rewriting of method oops during oop map generation") \ 150212657Skvn \ 150312657Skvn develop(bool, TraceICBuffer, false, \ 150412657Skvn "Trace usage of IC buffer") \ 150512657Skvn \ 150612657Skvn develop(bool, TraceCompiledIC, false, \ 150712657Skvn "Trace changes of compiled IC") \ 150812657Skvn \ 150912657Skvn develop(bool, TraceStartupTime, false, \ 151012657Skvn "Trace setup time") \ 151112657Skvn \ 151212657Skvn develop(bool, TraceProtectionDomainVerification, false, \ 151312657Skvn "Trace protection domain verification") \ 151412657Skvn \ 151512657Skvn develop(bool, TraceClearedExceptions, false, \ 151612657Skvn "Print when an exception is forcibly cleared") \ 151712657Skvn \ 151812657Skvn product(bool, TraceClassResolution, false, \ 151912657Skvn "Trace all constant pool resolutions (for debugging)") \ 152012657Skvn \ 152112657Skvn product(bool, TraceBiasedLocking, false, \ 152212657Skvn "Trace biased locking in JVM") \ 152312657Skvn \ 152412657Skvn product(bool, TraceMonitorInflation, false, \ 152512657Skvn "Trace monitor inflation in JVM") \ 152612657Skvn \ 152712657Skvn /* gc */ \ 152812657Skvn \ 152912657Skvn product(bool, UseSerialGC, false, \ 153012657Skvn "Use the Serial garbage collector") \ 153112657Skvn \ 153212657Skvn product(bool, UseG1GC, false, \ 153312657Skvn "Use the Garbage-First garbage collector") \ 153412657Skvn \ 153512657Skvn product(bool, UseParallelGC, false, \ 153612657Skvn "Use the Parallel Scavenge garbage collector") \ 153712657Skvn \ 153812657Skvn product(bool, UseParallelOldGC, false, \ 153912657Skvn "Use the Parallel Old garbage collector") \ 154012657Skvn \ 154112657Skvn product(uintx, HeapMaximumCompactionInterval, 20, \ 154212657Skvn "How often should we maximally compact the heap (not allowing " \ 154312657Skvn "any dead space)") \ 154412657Skvn \ 154512657Skvn product(uintx, HeapFirstMaximumCompactionCount, 3, \ 154612657Skvn "The collection count for the first maximum compaction") \ 154712657Skvn \ 154812657Skvn product(bool, UseMaximumCompactionOnSystemGC, true, \ 154912657Skvn "Use maximum compaction in the Parallel Old garbage collector " \ 155012657Skvn "for a system GC") \ 155112657Skvn \ 155212657Skvn product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ 155312657Skvn "The mean used by the parallel compact dead wood " \ 155412657Skvn "limiter (a number between 0-100)") \ 155512657Skvn range(0, 100) \ 155612657Skvn \ 155712657Skvn product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ 155812657Skvn "The standard deviation used by the parallel compact dead wood " \ 155912657Skvn "limiter (a number between 0-100)") \ 156012657Skvn range(0, 100) \ 156112657Skvn \ 156212657Skvn product(uint, ParallelGCThreads, 0, \ 156312657Skvn "Number of parallel threads parallel gc will use") \ 156412657Skvn constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \ 156512657Skvn \ 156612657Skvn diagnostic(bool, UseSemaphoreGCThreadsSynchronization, true, \ 156712657Skvn "Use semaphore synchronization for the GC Threads, " \ 156812657Skvn "instead of synchronization based on mutexes") \ 156912657Skvn \ 157012657Skvn product(bool, UseDynamicNumberOfGCThreads, false, \ 157112657Skvn "Dynamically choose the number of parallel threads " \ 157212657Skvn "parallel gc will use") \ 157312657Skvn \ 157412657Skvn diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ 157512657Skvn "Force dynamic selection of the number of " \ 157612657Skvn "parallel threads parallel gc will use to aid debugging") \ 157712657Skvn \ 157812657Skvn product(size_t, HeapSizePerGCThread, ScaleForWordSize(64*M), \ 157912657Skvn "Size of heap (bytes) per GC thread used in calculating the " \ 158012657Skvn "number of GC threads") \ 158112657Skvn range((size_t)os::vm_page_size(), (size_t)max_uintx) \ 158212657Skvn \ 158312657Skvn product(bool, TraceDynamicGCThreads, false, \ 158412657Skvn "Trace the dynamic GC thread usage") \ 158512657Skvn \ 158612657Skvn product(uint, ConcGCThreads, 0, \ 158712657Skvn "Number of threads concurrent gc will use") \ 158812657Skvn constraint(ConcGCThreadsConstraintFunc,AfterErgo) \ 158912657Skvn \ 159012657Skvn product(uintx, GCTaskTimeStampEntries, 200, \ 159112657Skvn "Number of time stamp entries per gc worker thread") \ 159212657Skvn range(1, max_uintx) \ 159312657Skvn \ 159412657Skvn product(bool, AlwaysTenure, false, \ 159512657Skvn "Always tenure objects in eden (ParallelGC only)") \ 159612657Skvn \ 159712657Skvn product(bool, NeverTenure, false, \ 159812657Skvn "Never tenure objects in eden, may tenure on overflow " \ 159912657Skvn "(ParallelGC only)") \ 160012657Skvn \ 160112657Skvn product(bool, ScavengeBeforeFullGC, true, \ 160212657Skvn "Scavenge youngest generation before each full GC.") \ 160312657Skvn \ 160412657Skvn product(bool, UseConcMarkSweepGC, false, \ 160512657Skvn "Use Concurrent Mark-Sweep GC in the old generation") \ 160612657Skvn \ 160712657Skvn product(bool, ExplicitGCInvokesConcurrent, false, \ 160812657Skvn "A System.gc() request invokes a concurrent collection; " \ 160912657Skvn "(effective only when using concurrent collectors)") \ 161012657Skvn \ 161112657Skvn product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ 161212657Skvn "A System.gc() request invokes a concurrent collection and " \ 161312657Skvn "also unloads classes during such a concurrent gc cycle " \ 161412657Skvn "(effective only when UseConcMarkSweepGC)") \ 161512657Skvn \ 161612657Skvn product(bool, GCLockerInvokesConcurrent, false, \ 161712657Skvn "The exit of a JNI critical section necessitating a scavenge, " \ 161812657Skvn "also kicks off a background concurrent collection") \ 161912657Skvn \ 162012657Skvn product(uintx, GCLockerEdenExpansionPercent, 5, \ 162112657Skvn "How much the GC can expand the eden by while the GC locker " \ 162212657Skvn "is active (as a percentage)") \ 162312657Skvn range(0, 100) \ 162412657Skvn \ 162512657Skvn diagnostic(uintx, GCLockerRetryAllocationCount, 2, \ 162612657Skvn "Number of times to retry allocations when " \ 162712657Skvn "blocked by the GC locker") \ 162812657Skvn \ 162912657Skvn product(bool, UseCMSBestFit, true, \ 163012657Skvn "Use CMS best fit allocation strategy") \ 163112657Skvn \ 163212657Skvn product(bool, UseParNewGC, false, \ 163312657Skvn "Use parallel threads in the new generation") \ 163412657Skvn \ 163512657Skvn product(bool, PrintTaskqueue, false, \ 163612657Skvn "Print taskqueue statistics for parallel collectors") \ 163712657Skvn \ 163812657Skvn product(bool, PrintTerminationStats, false, \ 163912657Skvn "Print termination statistics for parallel collectors") \ 164012657Skvn \ 164112657Skvn product(uintx, ParallelGCBufferWastePct, 10, \ 164212657Skvn "Wasted fraction of parallel allocation buffer") \ 164312657Skvn range(0, 100) \ 164412657Skvn \ 164512657Skvn product(uintx, TargetPLABWastePct, 10, \ 164612657Skvn "Target wasted space in last buffer as percent of overall " \ 164712657Skvn "allocation") \ 164812657Skvn range(1, 100) \ 164912657Skvn \ 165012657Skvn product(uintx, PLABWeight, 75, \ 165112657Skvn "Percentage (0-100) used to weight the current sample when " \ 165212657Skvn "computing exponentially decaying average for ResizePLAB") \ 165312657Skvn range(0, 100) \ 165412657Skvn \ 165512657Skvn product(bool, ResizePLAB, true, \ 165612657Skvn "Dynamically resize (survivor space) promotion LAB's") \ 165712657Skvn \ 165812657Skvn product(bool, PrintPLAB, false, \ 165912657Skvn "Print (survivor space) promotion LAB's sizing decisions") \ 166012657Skvn \ 166112657Skvn product(intx, ParGCArrayScanChunk, 50, \ 166212657Skvn "Scan a subset of object array and push remainder, if array is " \ 166312657Skvn "bigger than this") \ 166412968Siveresov range(1, max_intx) \ 166512657Skvn \ 166612657Skvn product(bool, ParGCUseLocalOverflow, false, \ 166712657Skvn "Instead of a global overflow list, use local overflow stacks") \ 166812657Skvn \ 166912657Skvn product(bool, ParGCTrimOverflow, true, \ 167012657Skvn "Eagerly trim the local overflow lists " \ 167112657Skvn "(when ParGCUseLocalOverflow)") \ 167212657Skvn \ 167312657Skvn notproduct(bool, ParGCWorkQueueOverflowALot, false, \ 167412657Skvn "Simulate work queue overflow in ParNew") \ 167512657Skvn \ 167612657Skvn notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ 167712657Skvn "An `interval' counter that determines how frequently " \ 167812657Skvn "we simulate overflow; a smaller number increases frequency") \ 167912657Skvn \ 168012657Skvn product(uintx, ParGCDesiredObjsFromOverflowList, 20, \ 168112657Skvn "The desired number of objects to claim from the overflow list") \ 168212657Skvn \ 168312657Skvn diagnostic(uintx, ParGCStridesPerThread, 2, \ 168412657Skvn "The number of strides per worker thread that we divide up the " \ 168512657Skvn "card table scanning work into") \ 168612657Skvn range(1, max_uintx) \ 168712657Skvn constraint(ParGCStridesPerThreadConstraintFunc,AfterErgo) \ 168812657Skvn \ 168912657Skvn diagnostic(intx, ParGCCardsPerStrideChunk, 256, \ 169012657Skvn "The number of cards in each chunk of the parallel chunks used " \ 169112657Skvn "during card table scanning") \ 169212968Siveresov range(1, max_intx) \ 169312657Skvn \ 169412657Skvn product(uintx, OldPLABWeight, 50, \ 169512657Skvn "Percentage (0-100) used to weight the current sample when " \ 169612657Skvn "computing exponentially decaying average for resizing " \ 169712657Skvn "OldPLABSize") \ 169812657Skvn range(0, 100) \ 169912657Skvn \ 170012657Skvn product(bool, ResizeOldPLAB, true, \ 170112657Skvn "Dynamically resize (old gen) promotion LAB's") \ 170212657Skvn \ 170312657Skvn product(bool, PrintOldPLAB, false, \ 170412657Skvn "Print (old gen) promotion LAB's sizing decisions") \ 170512657Skvn \ 170612657Skvn product(size_t, CMSOldPLABMax, 1024, \ 170712657Skvn "Maximum size of CMS gen promotion LAB caches per worker " \ 170812657Skvn "per block size") \ 170912657Skvn range(1, max_uintx) \ 171012657Skvn constraint(CMSOldPLABMaxConstraintFunc,AfterMemoryInit) \ 171112657Skvn \ 171212657Skvn product(size_t, CMSOldPLABMin, 16, \ 171312657Skvn "Minimum size of CMS gen promotion LAB caches per worker " \ 171412657Skvn "per block size") \ 171512657Skvn range(1, max_uintx) \ 171612657Skvn constraint(CMSOldPLABMinConstraintFunc,AfterMemoryInit) \ 171712657Skvn \ 171812657Skvn product(uintx, CMSOldPLABNumRefills, 4, \ 171912657Skvn "Nominal number of refills of CMS gen promotion LAB cache " \ 172012657Skvn "per worker per block size") \ 172112657Skvn range(1, max_uintx) \ 172212657Skvn \ 172312657Skvn product(bool, CMSOldPLABResizeQuicker, false, \ 172412657Skvn "React on-the-fly during a scavenge to a sudden " \ 172512657Skvn "change in block demand rate") \ 172612657Skvn \ 172712657Skvn product(uintx, CMSOldPLABToleranceFactor, 4, \ 172812657Skvn "The tolerance of the phase-change detector for on-the-fly " \ 172912657Skvn "PLAB resizing during a scavenge") \ 173012657Skvn range(1, max_uintx) \ 173112657Skvn \ 173212657Skvn product(uintx, CMSOldPLABReactivityFactor, 2, \ 173312657Skvn "The gain in the feedback loop for on-the-fly PLAB resizing " \ 173412657Skvn "during a scavenge") \ 173512657Skvn \ 173612657Skvn product(bool, AlwaysPreTouch, false, \ 173712657Skvn "Force all freshly committed pages to be pre-touched") \ 173812657Skvn \ 173912657Skvn product_pd(size_t, CMSYoungGenPerWorker, \ 174012657Skvn "The maximum size of young gen chosen by default per GC worker " \ 174112657Skvn "thread available") \ 174212657Skvn range(1, max_uintx) \ 174312657Skvn \ 174412657Skvn product(uintx, CMSIncrementalSafetyFactor, 10, \ 174512657Skvn "Percentage (0-100) used to add conservatism when computing the " \ 174612657Skvn "duty cycle") \ 174712657Skvn range(0, 100) \ 174812657Skvn \ 174912657Skvn product(uintx, CMSExpAvgFactor, 50, \ 175012657Skvn "Percentage (0-100) used to weight the current sample when " \ 175112657Skvn "computing exponential averages for CMS statistics") \ 175212657Skvn range(0, 100) \ 175312657Skvn \ 175412657Skvn product(uintx, CMS_FLSWeight, 75, \ 175512657Skvn "Percentage (0-100) used to weight the current sample when " \ 175612657Skvn "computing exponentially decaying averages for CMS FLS " \ 175712657Skvn "statistics") \ 175812657Skvn range(0, 100) \ 175912657Skvn \ 176012657Skvn product(uintx, CMS_FLSPadding, 1, \ 176112657Skvn "The multiple of deviation from mean to use for buffering " \ 176212657Skvn "against volatility in free list demand") \ 176312657Skvn \ 176412968Siveresov product(uintx, FLSCoalescePolicy, 2, \ 176512657Skvn "CMS: aggressiveness level for coalescing, increasing " \ 176612657Skvn "from 0 to 4") \ 176712657Skvn range(0, 4) \ 176812657Skvn \ 176912657Skvn product(bool, FLSAlwaysCoalesceLarge, false, \ 177012657Skvn "CMS: larger free blocks are always available for coalescing") \ 177112657Skvn \ 177212657Skvn product(double, FLSLargestBlockCoalesceProximity, 0.99, \ 177312657Skvn "CMS: the smaller the percentage the greater the coalescing " \ 177412657Skvn "force") \ 177512657Skvn range(0.0, 1.0) \ 177612657Skvn \ 177712657Skvn product(double, CMSSmallCoalSurplusPercent, 1.05, \ 177812657Skvn "CMS: the factor by which to inflate estimated demand of small " \ 177912657Skvn "block sizes to prevent coalescing with an adjoining block") \ 178012657Skvn range(0.0, DBL_MAX) \ 178112657Skvn \ 178212657Skvn product(double, CMSLargeCoalSurplusPercent, 0.95, \ 178312657Skvn "CMS: the factor by which to inflate estimated demand of large " \ 178412657Skvn "block sizes to prevent coalescing with an adjoining block") \ 178512657Skvn range(0.0, DBL_MAX) \ 178612657Skvn \ 178712657Skvn product(double, CMSSmallSplitSurplusPercent, 1.10, \ 178812657Skvn "CMS: the factor by which to inflate estimated demand of small " \ 178912657Skvn "block sizes to prevent splitting to supply demand for smaller " \ 179012657Skvn "blocks") \ 179112657Skvn range(0.0, DBL_MAX) \ 179212657Skvn \ 179312657Skvn product(double, CMSLargeSplitSurplusPercent, 1.00, \ 179412657Skvn "CMS: the factor by which to inflate estimated demand of large " \ 179512657Skvn "block sizes to prevent splitting to supply demand for smaller " \ 179612657Skvn "blocks") \ 179712657Skvn range(0.0, DBL_MAX) \ 179812657Skvn \ 179912657Skvn product(bool, CMSExtrapolateSweep, false, \ 180012657Skvn "CMS: cushion for block demand during sweep") \ 180112657Skvn \ 180212657Skvn product(uintx, CMS_SweepWeight, 75, \ 180312657Skvn "Percentage (0-100) used to weight the current sample when " \ 180412657Skvn "computing exponentially decaying average for inter-sweep " \ 180512657Skvn "duration") \ 180612657Skvn range(0, 100) \ 180712657Skvn \ 180812657Skvn product(uintx, CMS_SweepPadding, 1, \ 180912657Skvn "The multiple of deviation from mean to use for buffering " \ 181012657Skvn "against volatility in inter-sweep duration") \ 181112657Skvn \ 181212657Skvn product(uintx, CMS_SweepTimerThresholdMillis, 10, \ 181312657Skvn "Skip block flux-rate sampling for an epoch unless inter-sweep " \ 181412657Skvn "duration exceeds this threshold in milliseconds") \ 181512657Skvn \ 181612657Skvn product(bool, CMSClassUnloadingEnabled, true, \ 181712657Skvn "Whether class unloading enabled when using CMS GC") \ 181812657Skvn \ 181912657Skvn product(uintx, CMSClassUnloadingMaxInterval, 0, \ 182012657Skvn "When CMS class unloading is enabled, the maximum CMS cycle " \ 182112657Skvn "count for which classes may not be unloaded") \ 182212657Skvn \ 182312657Skvn product(uintx, CMSIndexedFreeListReplenish, 4, \ 182412657Skvn "Replenish an indexed free list with this number of chunks") \ 182512657Skvn range(1, max_uintx) \ 182612657Skvn \ 182712657Skvn product(bool, CMSReplenishIntermediate, true, \ 182812657Skvn "Replenish all intermediate free-list caches") \ 182912657Skvn \ 183012657Skvn product(bool, CMSSplitIndexedFreeListBlocks, true, \ 183112657Skvn "When satisfying batched demand, split blocks from the " \ 183212657Skvn "IndexedFreeList whose size is a multiple of requested size") \ 183312657Skvn \ 183412657Skvn product(bool, CMSLoopWarn, false, \ 183512657Skvn "Warn in case of excessive CMS looping") \ 183612657Skvn \ 183712657Skvn /* where does the range max value of (max_jint - 1) come from? */ \ 183812657Skvn product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ 183912657Skvn "Maximum size of marking stack") \ 184012657Skvn range(1, (max_jint - 1)) \ 184112657Skvn \ 184212657Skvn product(size_t, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ 184312657Skvn "Size of marking stack") \ 184412657Skvn constraint(MarkStackSizeConstraintFunc,AfterErgo) \ 184512657Skvn \ 184612657Skvn notproduct(bool, CMSMarkStackOverflowALot, false, \ 184712657Skvn "Simulate frequent marking stack / work queue overflow") \ 184812657Skvn \ 184912657Skvn notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \ 185012657Skvn "An \"interval\" counter that determines how frequently " \ 185112657Skvn "to simulate overflow; a smaller number increases frequency") \ 185212657Skvn \ 185312657Skvn product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ 185412657Skvn "Maximum number of abortable preclean iterations, if > 0") \ 185512657Skvn \ 185612657Skvn product(intx, CMSMaxAbortablePrecleanTime, 5000, \ 185712657Skvn "Maximum time in abortable preclean (in milliseconds)") \ 185812657Skvn range(0, max_intx) \ 185912657Skvn \ 186012657Skvn product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ 186112657Skvn "Nominal minimum work per abortable preclean iteration") \ 186212657Skvn \ 186312657Skvn manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ 186412657Skvn "Time that we sleep between iterations when not given " \ 186512657Skvn "enough work per iteration") \ 186612657Skvn range(0, max_intx) \ 186712657Skvn \ 186812657Skvn product(size_t, CMSRescanMultiple, 32, \ 186912657Skvn "Size (in cards) of CMS parallel rescan task") \ 187012657Skvn range(1, max_uintx) \ 187112657Skvn \ 187212657Skvn product(size_t, CMSConcMarkMultiple, 32, \ 187312657Skvn "Size (in cards) of CMS concurrent MT marking task") \ 187412657Skvn range(1, max_uintx) \ 187512657Skvn \ 187612657Skvn product(bool, CMSAbortSemantics, false, \ 187712657Skvn "Whether abort-on-overflow semantics is implemented") \ 187812657Skvn \ 187912657Skvn product(bool, CMSParallelInitialMarkEnabled, true, \ 188012657Skvn "Use the parallel initial mark.") \ 188112657Skvn \ 188212657Skvn product(bool, CMSParallelRemarkEnabled, true, \ 188312657Skvn "Whether parallel remark enabled (only if ParNewGC)") \ 188412657Skvn \ 188512657Skvn product(bool, CMSParallelSurvivorRemarkEnabled, true, \ 188612657Skvn "Whether parallel remark of survivor space " \ 188712657Skvn "enabled (effective only if CMSParallelRemarkEnabled)") \ 188812657Skvn \ 188912657Skvn product(bool, CMSPLABRecordAlways, true, \ 189012657Skvn "Always record survivor space PLAB boundaries (effective only " \ 189112657Skvn "if CMSParallelSurvivorRemarkEnabled)") \ 189212657Skvn \ 189312657Skvn product(bool, CMSEdenChunksRecordAlways, true, \ 189412657Skvn "Always record eden chunks used for the parallel initial mark " \ 189512657Skvn "or remark of eden") \ 189612657Skvn \ 189712657Skvn product(bool, CMSPrintEdenSurvivorChunks, false, \ 189812657Skvn "Print the eden and the survivor chunks used for the parallel " \ 189912657Skvn "initial mark or remark of the eden/survivor spaces") \ 190012657Skvn \ 190112657Skvn product(bool, CMSConcurrentMTEnabled, true, \ 190212657Skvn "Whether multi-threaded concurrent work enabled " \ 190312657Skvn "(effective only if ParNewGC)") \ 190412657Skvn \ 190512657Skvn product(bool, CMSPrecleaningEnabled, true, \ 190612657Skvn "Whether concurrent precleaning enabled") \ 190712657Skvn \ 190812657Skvn product(uintx, CMSPrecleanIter, 3, \ 190912657Skvn "Maximum number of precleaning iteration passes") \ 191012657Skvn range(0, 9) \ 191112657Skvn \ 191212657Skvn product(uintx, CMSPrecleanDenominator, 3, \ 191312657Skvn "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 191412657Skvn "ratio") \ 191512657Skvn range(1, max_uintx) \ 191612657Skvn constraint(CMSPrecleanDenominatorConstraintFunc,AfterErgo) \ 191712657Skvn \ 191812657Skvn product(uintx, CMSPrecleanNumerator, 2, \ 191912657Skvn "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 192012657Skvn "ratio") \ 192112657Skvn range(0, max_uintx-1) \ 192212657Skvn constraint(CMSPrecleanNumeratorConstraintFunc,AfterErgo) \ 192312657Skvn \ 192412657Skvn product(bool, CMSPrecleanRefLists1, true, \ 192512657Skvn "Preclean ref lists during (initial) preclean phase") \ 192612657Skvn \ 192712657Skvn product(bool, CMSPrecleanRefLists2, false, \ 192812657Skvn "Preclean ref lists during abortable preclean phase") \ 192912657Skvn \ 193012657Skvn product(bool, CMSPrecleanSurvivors1, false, \ 193112657Skvn "Preclean survivors during (initial) preclean phase") \ 193212657Skvn \ 193312657Skvn product(bool, CMSPrecleanSurvivors2, true, \ 193412657Skvn "Preclean survivors during abortable preclean phase") \ 193512657Skvn \ 193612657Skvn product(uintx, CMSPrecleanThreshold, 1000, \ 193712657Skvn "Do not iterate again if number of dirty cards is less than this")\ 193812657Skvn range(100, max_uintx) \ 193912657Skvn \ 194012657Skvn product(bool, CMSCleanOnEnter, true, \ 194112657Skvn "Clean-on-enter optimization for reducing number of dirty cards") \ 194212657Skvn \ 194312657Skvn product(uintx, CMSRemarkVerifyVariant, 1, \ 194412657Skvn "Choose variant (1,2) of verification following remark") \ 194512657Skvn range(1, 2) \ 194612657Skvn \ 194712657Skvn product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ 194812657Skvn "If Eden size is below this, do not try to schedule remark") \ 194912657Skvn \ 195012657Skvn product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ 195112657Skvn "The Eden occupancy percentage (0-100) at which " \ 195212657Skvn "to try and schedule remark pause") \ 195312657Skvn range(0, 100) \ 195412657Skvn \ 195512657Skvn product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ 195612657Skvn "Start sampling eden top at least before young gen " \ 195712657Skvn "occupancy reaches 1/<ratio> of the size at which " \ 195812657Skvn "we plan to schedule remark") \ 195912657Skvn range(1, max_uintx) \ 196012657Skvn \ 196112657Skvn product(uintx, CMSSamplingGrain, 16*K, \ 196212657Skvn "The minimum distance between eden samples for CMS (see above)") \ 196312657Skvn range(1, max_uintx) \ 196412657Skvn \ 196512657Skvn product(bool, CMSScavengeBeforeRemark, false, \ 196612657Skvn "Attempt scavenge before the CMS remark step") \ 196712657Skvn \ 196812657Skvn develop(bool, CMSTraceSweeper, false, \ 196912657Skvn "Trace some actions of the CMS sweeper") \ 197012657Skvn \ 197112657Skvn product(uintx, CMSWorkQueueDrainThreshold, 10, \ 197212657Skvn "Don't drain below this size per parallel worker/thief") \ 197312657Skvn range(1, max_juint) \ 197412657Skvn constraint(CMSWorkQueueDrainThresholdConstraintFunc,AfterErgo) \ 197512657Skvn \ 197612657Skvn manageable(intx, CMSWaitDuration, 2000, \ 197712657Skvn "Time in milliseconds that CMS thread waits for young GC") \ 197812657Skvn \ 197912968Siveresov develop(uintx, CMSCheckInterval, 1000, \ 198012657Skvn "Interval in milliseconds that CMS thread checks if it " \ 198112657Skvn "should start a collection cycle") \ 198212657Skvn \ 198312657Skvn product(bool, CMSYield, true, \ 198412657Skvn "Yield between steps of CMS") \ 198512657Skvn \ 198612657Skvn product(size_t, CMSBitMapYieldQuantum, 10*M, \ 198712657Skvn "Bitmap operations should process at most this many bits " \ 198812657Skvn "between yields") \ 198912657Skvn range(1, max_uintx) \ 199012657Skvn \ 199112657Skvn product(bool, CMSDumpAtPromotionFailure, false, \ 199212657Skvn "Dump useful information about the state of the CMS old " \ 199312657Skvn "generation upon a promotion failure") \ 199412657Skvn \ 199512657Skvn product(bool, CMSPrintChunksInDump, false, \ 199612657Skvn "In a dump enabled by CMSDumpAtPromotionFailure, include " \ 199712657Skvn "more detailed information about the free chunks") \ 199812657Skvn \ 199912657Skvn product(bool, CMSPrintObjectsInDump, false, \ 200012657Skvn "In a dump enabled by CMSDumpAtPromotionFailure, include " \ 200112657Skvn "more detailed information about the allocated objects") \ 200212657Skvn \ 200312657Skvn diagnostic(bool, FLSVerifyAllHeapReferences, false, \ 200412657Skvn "Verify that all references across the FLS boundary " \ 200512657Skvn "are to valid objects") \ 200612657Skvn \ 200712657Skvn diagnostic(bool, FLSVerifyLists, false, \ 200812657Skvn "Do lots of (expensive) FreeListSpace verification") \ 200912657Skvn \ 201012657Skvn diagnostic(bool, FLSVerifyIndexTable, false, \ 201112657Skvn "Do lots of (expensive) FLS index table verification") \ 201212657Skvn \ 201312657Skvn develop(bool, FLSVerifyDictionary, false, \ 201412657Skvn "Do lots of (expensive) FLS dictionary verification") \ 201512657Skvn \ 201612657Skvn develop(bool, VerifyBlockOffsetArray, false, \ 201712657Skvn "Do (expensive) block offset array verification") \ 201812657Skvn \ 201912657Skvn diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ 202012657Skvn "Maintain _unallocated_block in BlockOffsetArray " \ 202112657Skvn "(currently applicable only to CMS collector)") \ 202212657Skvn \ 202312657Skvn develop(bool, TraceCMSState, false, \ 202412657Skvn "Trace the state of the CMS collection") \ 202512657Skvn \ 202612657Skvn product(intx, RefDiscoveryPolicy, 0, \ 202712657Skvn "Select type of reference discovery policy: " \ 202812657Skvn "reference-based(0) or referent-based(1)") \ 202912657Skvn range(ReferenceProcessor::DiscoveryPolicyMin, \ 203012657Skvn ReferenceProcessor::DiscoveryPolicyMax) \ 203112657Skvn \ 203212657Skvn product(bool, ParallelRefProcEnabled, false, \ 203312657Skvn "Enable parallel reference processing whenever possible") \ 203412657Skvn \ 203512657Skvn product(bool, ParallelRefProcBalancingEnabled, true, \ 203612657Skvn "Enable balancing of reference processing queues") \ 203712657Skvn \ 203812657Skvn product(uintx, CMSTriggerRatio, 80, \ 203912657Skvn "Percentage of MinHeapFreeRatio in CMS generation that is " \ 204012657Skvn "allocated before a CMS collection cycle commences") \ 204112657Skvn range(0, 100) \ 204212657Skvn \ 204312657Skvn product(uintx, CMSBootstrapOccupancy, 50, \ 204412657Skvn "Percentage CMS generation occupancy at which to " \ 204512657Skvn "initiate CMS collection for bootstrapping collection stats") \ 204612657Skvn range(0, 100) \ 204712657Skvn \ 204812657Skvn product(intx, CMSInitiatingOccupancyFraction, -1, \ 204912657Skvn "Percentage CMS generation occupancy to start a CMS collection " \ 205012657Skvn "cycle. A negative value means that CMSTriggerRatio is used") \ 205112657Skvn range(min_intx, 100) \ 205212657Skvn \ 205312657Skvn product(uintx, InitiatingHeapOccupancyPercent, 45, \ 205412657Skvn "Percentage of the (entire) heap occupancy to start a " \ 205512657Skvn "concurrent GC cycle. It is used by GCs that trigger a " \ 205612657Skvn "concurrent GC cycle based on the occupancy of the entire heap, " \ 205712657Skvn "not just one of the generations (e.g., G1). A value of 0 " \ 205812657Skvn "denotes 'do constant GC cycles'.") \ 205912657Skvn range(0, 100) \ 206012657Skvn \ 206112657Skvn manageable(intx, CMSTriggerInterval, -1, \ 206212657Skvn "Commence a CMS collection cycle (at least) every so many " \ 206312657Skvn "milliseconds (0 permanently, -1 disabled)") \ 206412657Skvn range(-1, max_intx) \ 206512657Skvn \ 206612657Skvn product(bool, UseCMSInitiatingOccupancyOnly, false, \ 206712657Skvn "Only use occupancy as a criterion for starting a CMS collection")\ 206812657Skvn \ 206912657Skvn product(uintx, CMSIsTooFullPercentage, 98, \ 207012657Skvn "An absolute ceiling above which CMS will always consider the " \ 207112657Skvn "unloading of classes when class unloading is enabled") \ 207212657Skvn range(0, 100) \ 207312657Skvn \ 207412657Skvn develop(bool, CMSTestInFreeList, false, \ 207512657Skvn "Check if the coalesced range is already in the " \ 207612657Skvn "free lists as claimed") \ 207712657Skvn \ 207812657Skvn notproduct(bool, CMSVerifyReturnedBytes, false, \ 207912657Skvn "Check that all the garbage collected was returned to the " \ 208012657Skvn "free lists") \ 208112657Skvn \ 208212657Skvn notproduct(bool, ScavengeALot, false, \ 208312657Skvn "Force scavenge at every Nth exit from the runtime system " \ 208412657Skvn "(N=ScavengeALotInterval)") \ 208512657Skvn \ 208612657Skvn develop(bool, FullGCALot, false, \ 208712657Skvn "Force full gc at every Nth exit from the runtime system " \ 208812657Skvn "(N=FullGCALotInterval)") \ 208912657Skvn \ 209012657Skvn notproduct(bool, GCALotAtAllSafepoints, false, \ 209112657Skvn "Enforce ScavengeALot/GCALot at all potential safepoints") \ 209212657Skvn \ 209312657Skvn product(bool, PrintPromotionFailure, false, \ 209412657Skvn "Print additional diagnostic information following " \ 209512657Skvn "promotion failure") \ 209612657Skvn \ 209712657Skvn notproduct(bool, PromotionFailureALot, false, \ 209812657Skvn "Use promotion failure handling on every youngest generation " \ 209912657Skvn "collection") \ 210012657Skvn \ 210112657Skvn develop(uintx, PromotionFailureALotCount, 1000, \ 210212657Skvn "Number of promotion failures occurring at PLAB " \ 210312657Skvn "refill attempts (ParNew) or promotion attempts " \ 210412657Skvn "(other young collectors)") \ 210512657Skvn \ 210612657Skvn develop(uintx, PromotionFailureALotInterval, 5, \ 210712657Skvn "Total collections between promotion failures a lot") \ 210812657Skvn \ 210912657Skvn experimental(uintx, WorkStealingSleepMillis, 1, \ 211012657Skvn "Sleep time when sleep is used for yields") \ 211112657Skvn \ 211212657Skvn experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \ 211312657Skvn "Number of yields before a sleep is done during work stealing") \ 211412657Skvn \ 211512657Skvn experimental(uintx, WorkStealingHardSpins, 4096, \ 211612657Skvn "Number of iterations in a spin loop between checks on " \ 211712657Skvn "time out of hard spin") \ 211812657Skvn \ 211912657Skvn experimental(uintx, WorkStealingSpinToYieldRatio, 10, \ 212012657Skvn "Ratio of hard spins to calls to yield") \ 212112657Skvn \ 212212657Skvn develop(uintx, ObjArrayMarkingStride, 512, \ 212312657Skvn "Number of object array elements to push onto the marking stack " \ 212412657Skvn "before pushing a continuation entry") \ 212512657Skvn \ 212612657Skvn develop(bool, MetadataAllocationFailALot, false, \ 212712968Siveresov "Fail metadata allocations at intervals controlled by " \ 212812657Skvn "MetadataAllocationFailALotInterval") \ 212912657Skvn \ 213012657Skvn develop(uintx, MetadataAllocationFailALotInterval, 1000, \ 213112657Skvn "Metadata allocation failure a lot interval") \ 213212657Skvn \ 213312657Skvn develop(bool, TraceMetadataChunkAllocation, false, \ 213412657Skvn "Trace chunk metadata allocations") \ 213512657Skvn \ 213612657Skvn product(bool, TraceMetadataHumongousAllocation, false, \ 213712657Skvn "Trace humongous metadata allocations") \ 213812657Skvn \ 213912657Skvn develop(bool, TraceMetavirtualspaceAllocation, false, \ 214012657Skvn "Trace virtual space metadata allocations") \ 214112657Skvn \ 214212657Skvn notproduct(bool, ExecuteInternalVMTests, false, \ 214312657Skvn "Enable execution of internal VM tests") \ 214412657Skvn \ 214512657Skvn notproduct(bool, VerboseInternalVMTests, false, \ 214612657Skvn "Turn on logging for internal VM tests.") \ 214712657Skvn \ 214812657Skvn product_pd(bool, UseTLAB, "Use thread-local object allocation") \ 214912657Skvn \ 215012657Skvn product_pd(bool, ResizeTLAB, \ 215112657Skvn "Dynamically resize TLAB size for threads") \ 215212657Skvn \ 215312657Skvn product(bool, ZeroTLAB, false, \ 215412657Skvn "Zero out the newly created TLAB") \ 215512657Skvn \ 215612657Skvn product(bool, FastTLABRefill, true, \ 215712657Skvn "Use fast TLAB refill code") \ 215812657Skvn \ 215912657Skvn product(bool, PrintTLAB, false, \ 216012657Skvn "Print various TLAB related information") \ 216112657Skvn \ 216212657Skvn product(bool, TLABStats, true, \ 216312657Skvn "Provide more detailed and expensive TLAB statistics " \ 216412657Skvn "(with PrintTLAB)") \ 216512657Skvn \ 216612657Skvn product_pd(bool, NeverActAsServerClassMachine, \ 216712657Skvn "Never act like a server-class machine") \ 216812657Skvn \ 216912657Skvn product(bool, AlwaysActAsServerClassMachine, false, \ 217012657Skvn "Always act like a server-class machine") \ 217112657Skvn \ 217212657Skvn product_pd(uint64_t, MaxRAM, \ 217312657Skvn "Real memory size (in bytes) used to set maximum heap size") \ 217412657Skvn range(0, 0XFFFFFFFFFFFFFFFF) \ 217512657Skvn \ 217612657Skvn product(size_t, ErgoHeapSizeLimit, 0, \ 217712657Skvn "Maximum ergonomically set heap size (in bytes); zero means use " \ 217812657Skvn "MaxRAM / MaxRAMFraction") \ 217912657Skvn \ 218012657Skvn product(uintx, MaxRAMFraction, 4, \ 218112657Skvn "Maximum fraction (1/n) of real memory used for maximum heap " \ 218212657Skvn "size") \ 218312657Skvn range(1, max_uintx) \ 218412657Skvn \ 218512657Skvn product(uintx, MinRAMFraction, 2, \ 218612657Skvn "Minimum fraction (1/n) of real memory used for maximum heap " \ 218712657Skvn "size on systems with small physical memory size") \ 218812657Skvn range(1, max_uintx) \ 218912657Skvn \ 219012657Skvn product(uintx, InitialRAMFraction, 64, \ 219112657Skvn "Fraction (1/n) of real memory used for initial heap size") \ 219212657Skvn range(1, max_uintx) \ 219312657Skvn \ 219412657Skvn develop(uintx, MaxVirtMemFraction, 2, \ 219512657Skvn "Maximum fraction (1/n) of virtual memory used for ergonomically "\ 219612657Skvn "determining maximum heap size") \ 219712657Skvn \ 219812657Skvn product(bool, UseAutoGCSelectPolicy, false, \ 219912657Skvn "Use automatic collection selection policy") \ 220012657Skvn \ 220112657Skvn product(uintx, AutoGCSelectPauseMillis, 5000, \ 220212657Skvn "Automatic GC selection pause threshold in milliseconds") \ 220312657Skvn \ 220412657Skvn product(bool, UseAdaptiveSizePolicy, true, \ 220512657Skvn "Use adaptive generation sizing policies") \ 220612657Skvn \ 220712657Skvn product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \ 220812657Skvn "Use adaptive survivor sizing policies") \ 220912657Skvn \ 221012657Skvn product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \ 221112657Skvn "Use adaptive young-old sizing policies at minor collections") \ 221212657Skvn \ 221312657Skvn product(bool, UseAdaptiveGenerationSizePolicyAtMajorCollection, true, \ 221412657Skvn "Use adaptive young-old sizing policies at major collections") \ 221512657Skvn \ 221612657Skvn product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ 221712657Skvn "Include statistics from System.gc() for adaptive size policy") \ 221812657Skvn \ 221912657Skvn product(bool, UseAdaptiveGCBoundary, false, \ 222012657Skvn "Allow young-old boundary to move") \ 222112657Skvn \ 222212657Skvn develop(bool, TraceAdaptiveGCBoundary, false, \ 222312657Skvn "Trace young-old boundary moves") \ 222412657Skvn \ 222512657Skvn develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1, \ 222612657Skvn "Resize the virtual spaces of the young or old generations") \ 222712657Skvn range(-1, 1) \ 222812657Skvn \ 222912657Skvn product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ 223012657Skvn "Policy for changing generation size for throughput goals") \ 223112657Skvn range(0, 1) \ 223212657Skvn \ 223312657Skvn product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ 223412657Skvn "Number of steps where heuristics is used before data is used") \ 223512657Skvn \ 223612657Skvn develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \ 223712657Skvn "Number of collections before the adaptive sizing is started") \ 223812657Skvn \ 223912657Skvn product(uintx, AdaptiveSizePolicyOutputInterval, 0, \ 224012657Skvn "Collection interval for printing information; zero means never") \ 224112657Skvn \ 224212657Skvn product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \ 224312657Skvn "Use adaptive minimum footprint as a goal") \ 224412657Skvn \ 224512657Skvn product(uintx, AdaptiveSizePolicyWeight, 10, \ 224612657Skvn "Weight given to exponential resizing, between 0 and 100") \ 224712657Skvn range(0, 100) \ 224812657Skvn \ 224912657Skvn product(uintx, AdaptiveTimeWeight, 25, \ 225012657Skvn "Weight given to time in adaptive policy, between 0 and 100") \ 225112657Skvn range(0, 100) \ 225212657Skvn \ 225312657Skvn product(uintx, PausePadding, 1, \ 225412657Skvn "How much buffer to keep for pause time") \ 225512657Skvn \ 225612657Skvn product(uintx, PromotedPadding, 3, \ 225712657Skvn "How much buffer to keep for promotion failure") \ 225812657Skvn \ 225912657Skvn product(uintx, SurvivorPadding, 3, \ 226012657Skvn "How much buffer to keep for survivor overflow") \ 226112657Skvn \ 226212657Skvn product(uintx, ThresholdTolerance, 10, \ 226312657Skvn "Allowed collection cost difference between generations") \ 226412657Skvn range(0, 100) \ 226512657Skvn \ 226612657Skvn product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \ 226712657Skvn "If collection costs are within margin, reduce both by full " \ 226812657Skvn "delta") \ 226912657Skvn \ 227012657Skvn product(uintx, YoungGenerationSizeIncrement, 20, \ 227112657Skvn "Adaptive size percentage change in young generation") \ 227212657Skvn range(0, 100) \ 227312657Skvn \ 227412657Skvn product(uintx, YoungGenerationSizeSupplement, 80, \ 227512657Skvn "Supplement to YoungedGenerationSizeIncrement used at startup") \ 227612657Skvn range(0, 100) \ 227712657Skvn \ 227812657Skvn product(uintx, YoungGenerationSizeSupplementDecay, 8, \ 227912657Skvn "Decay factor to YoungedGenerationSizeSupplement") \ 228012657Skvn range(1, max_uintx) \ 228112657Skvn \ 228212657Skvn product(uintx, TenuredGenerationSizeIncrement, 20, \ 228312657Skvn "Adaptive size percentage change in tenured generation") \ 228412657Skvn range(0, 100) \ 228512657Skvn \ 228612657Skvn product(uintx, TenuredGenerationSizeSupplement, 80, \ 228712657Skvn "Supplement to TenuredGenerationSizeIncrement used at startup") \ 228812657Skvn range(0, 100) \ 228912657Skvn \ 229012657Skvn product(uintx, TenuredGenerationSizeSupplementDecay, 2, \ 229112657Skvn "Decay factor to TenuredGenerationSizeIncrement") \ 229212657Skvn range(1, max_uintx) \ 229312657Skvn \ 229412657Skvn product(uintx, MaxGCPauseMillis, max_uintx, \ 229512657Skvn "Adaptive size policy maximum GC pause time goal in millisecond, "\ 229612657Skvn "or (G1 Only) the maximum GC time per MMU time slice") \ 229712657Skvn range(1, max_uintx) \ 229812657Skvn constraint(MaxGCPauseMillisConstraintFunc,AfterMemoryInit) \ 229912657Skvn \ 230012657Skvn product(uintx, GCPauseIntervalMillis, 0, \ 230112657Skvn "Time slice for MMU specification") \ 230212657Skvn constraint(GCPauseIntervalMillisConstraintFunc,AfterMemoryInit) \ 230312657Skvn \ 230412657Skvn product(uintx, MaxGCMinorPauseMillis, max_uintx, \ 230512657Skvn "Adaptive size policy maximum GC minor pause time goal " \ 230612657Skvn "in millisecond") \ 230712657Skvn \ 230812657Skvn product(uintx, GCTimeRatio, 99, \ 230912657Skvn "Adaptive size policy application time to GC time ratio") \ 231012657Skvn \ 231112657Skvn product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \ 231212657Skvn "Adaptive size scale down factor for shrinking") \ 231312657Skvn range(1, max_uintx) \ 231412657Skvn \ 231512657Skvn product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \ 231612657Skvn "Adaptive size decays the major cost for long major intervals") \ 231712657Skvn \ 231812657Skvn product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \ 231912657Skvn "Time scale over which major costs decay") \ 232012657Skvn \ 232112657Skvn product(uintx, MinSurvivorRatio, 3, \ 232212657Skvn "Minimum ratio of young generation/survivor space size") \ 232312657Skvn range(3, max_uintx) \ 232412657Skvn \ 232512657Skvn product(uintx, InitialSurvivorRatio, 8, \ 232612657Skvn "Initial ratio of young generation/survivor space size") \ 232712657Skvn \ 232812657Skvn product(size_t, BaseFootPrintEstimate, 256*M, \ 232912657Skvn "Estimate of footprint other than Java Heap") \ 233012657Skvn \ 233112657Skvn product(bool, UseGCOverheadLimit, true, \ 233212657Skvn "Use policy to limit of proportion of time spent in GC " \ 233312657Skvn "before an OutOfMemory error is thrown") \ 233412657Skvn \ 233512657Skvn product(uintx, GCTimeLimit, 98, \ 233612657Skvn "Limit of the proportion of time spent in GC before " \ 233712657Skvn "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \ 233812657Skvn range(0, 100) \ 233912657Skvn \ 234012657Skvn product(uintx, GCHeapFreeLimit, 2, \ 234112657Skvn "Minimum percentage of free space after a full GC before an " \ 234212657Skvn "OutOfMemoryError is thrown (used with GCTimeLimit)") \ 234312657Skvn range(0, 100) \ 234412657Skvn \ 234512657Skvn develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5, \ 234612657Skvn "Number of consecutive collections before gc time limit fires") \ 234712657Skvn range(1, max_uintx) \ 234812657Skvn \ 234912657Skvn product(bool, PrintAdaptiveSizePolicy, false, \ 235012657Skvn "Print information about AdaptiveSizePolicy") \ 235112657Skvn \ 235212657Skvn product(intx, PrefetchCopyIntervalInBytes, -1, \ 235312657Skvn "How far ahead to prefetch destination area (<= 0 means off)") \ 235412657Skvn \ 235512657Skvn product(intx, PrefetchScanIntervalInBytes, -1, \ 235612657Skvn "How far ahead to prefetch scan area (<= 0 means off)") \ 235712657Skvn \ 235812657Skvn product(intx, PrefetchFieldsAhead, -1, \ 235912657Skvn "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ 236012657Skvn \ 236112657Skvn diagnostic(bool, VerifySilently, false, \ 236212657Skvn "Do not print the verification progress") \ 236312657Skvn \ 236412657Skvn diagnostic(bool, VerifyDuringStartup, false, \ 236512657Skvn "Verify memory system before executing any Java code " \ 236612657Skvn "during VM initialization") \ 236712657Skvn \ 236812657Skvn diagnostic(bool, VerifyBeforeExit, trueInDebug, \ 236912657Skvn "Verify system before exiting") \ 237012657Skvn \ 237112657Skvn diagnostic(bool, VerifyBeforeGC, false, \ 237212657Skvn "Verify memory system before GC") \ 237312657Skvn \ 237412657Skvn diagnostic(bool, VerifyAfterGC, false, \ 237512657Skvn "Verify memory system after GC") \ 237612657Skvn \ 237712657Skvn diagnostic(bool, VerifyDuringGC, false, \ 237812657Skvn "Verify memory system during GC (between phases)") \ 237912657Skvn \ 238012657Skvn diagnostic(bool, GCParallelVerificationEnabled, true, \ 238112657Skvn "Enable parallel memory system verification") \ 238212657Skvn \ 238312657Skvn diagnostic(bool, DeferInitialCardMark, false, \ 238412657Skvn "When +ReduceInitialCardMarks, explicitly defer any that " \ 238512657Skvn "may arise from new_pre_store_barrier") \ 238612657Skvn \ 238712657Skvn product(bool, UseCondCardMark, false, \ 238812657Skvn "Check for already marked card before updating card table") \ 238912657Skvn \ 239012657Skvn diagnostic(bool, VerifyRememberedSets, false, \ 239112657Skvn "Verify GC remembered sets") \ 239212657Skvn \ 239312657Skvn diagnostic(bool, VerifyObjectStartArray, true, \ 239412657Skvn "Verify GC object start array if verify before/after") \ 239512657Skvn \ 239612657Skvn product(bool, DisableExplicitGC, false, \ 239712657Skvn "Ignore calls to System.gc()") \ 239812657Skvn \ 239912657Skvn notproduct(bool, CheckMemoryInitialization, false, \ 240012657Skvn "Check memory initialization") \ 240112657Skvn \ 240212657Skvn diagnostic(bool, BindCMSThreadToCPU, false, \ 240312657Skvn "Bind CMS Thread to CPU if possible") \ 240412657Skvn \ 240512657Skvn diagnostic(uintx, CPUForCMSThread, 0, \ 240612657Skvn "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \ 240712657Skvn \ 240812657Skvn product(bool, BindGCTaskThreadsToCPUs, false, \ 240912657Skvn "Bind GCTaskThreads to CPUs if possible") \ 241012657Skvn \ 241112657Skvn product(bool, UseGCTaskAffinity, false, \ 241212657Skvn "Use worker affinity when asking for GCTasks") \ 241312657Skvn \ 241412657Skvn product(uintx, ProcessDistributionStride, 4, \ 241512657Skvn "Stride through processors when distributing processes") \ 241612657Skvn \ 241712657Skvn product(uintx, CMSCoordinatorYieldSleepCount, 10, \ 241812657Skvn "Number of times the coordinator GC thread will sleep while " \ 241912657Skvn "yielding before giving up and resuming GC") \ 242012657Skvn \ 242112657Skvn product(uintx, CMSYieldSleepCount, 0, \ 242212657Skvn "Number of times a GC thread (minus the coordinator) " \ 242312657Skvn "will sleep while yielding before giving up and resuming GC") \ 242412657Skvn \ 242512657Skvn /* gc tracing */ \ 242612657Skvn manageable(bool, PrintGC, false, \ 242712657Skvn "Print message at garbage collection") \ 242812657Skvn \ 242912657Skvn manageable(bool, PrintGCDetails, false, \ 243012657Skvn "Print more details at garbage collection") \ 243112657Skvn \ 243212657Skvn manageable(bool, PrintGCDateStamps, false, \ 243312657Skvn "Print date stamps at garbage collection") \ 243412657Skvn \ 243512657Skvn manageable(bool, PrintGCTimeStamps, false, \ 243612657Skvn "Print timestamps at garbage collection") \ 243712657Skvn \ 243812657Skvn manageable(bool, PrintGCID, true, \ 243912657Skvn "Print an identifier for each garbage collection") \ 244012657Skvn \ 244112657Skvn product(bool, PrintGCTaskTimeStamps, false, \ 244212657Skvn "Print timestamps for individual gc worker thread tasks") \ 244312657Skvn \ 244412657Skvn develop(intx, ConcGCYieldTimeout, 0, \ 244512657Skvn "If non-zero, assert that GC threads yield within this " \ 244612657Skvn "number of milliseconds") \ 244712657Skvn range(0, max_intx) \ 244812657Skvn \ 244912657Skvn product(bool, PrintReferenceGC, false, \ 245012657Skvn "Print times spent handling reference objects during GC " \ 245112657Skvn "(enabled only when PrintGCDetails)") \ 245212657Skvn \ 245312657Skvn develop(bool, TraceReferenceGC, false, \ 245412657Skvn "Trace handling of soft/weak/final/phantom references") \ 245512657Skvn \ 245612657Skvn develop(bool, TraceFinalizerRegistration, false, \ 245712657Skvn "Trace registration of final references") \ 245812657Skvn \ 245912657Skvn notproduct(bool, TraceScavenge, false, \ 246012657Skvn "Trace scavenge") \ 246112657Skvn \ 246212657Skvn product(bool, IgnoreEmptyClassPaths, false, \ 246312657Skvn "Ignore empty path elements in -classpath") \ 246412657Skvn \ 246512657Skvn product(bool, TraceClassPaths, false, \ 246612657Skvn "Trace processing of class paths") \ 246712657Skvn \ 246812657Skvn product_rw(bool, TraceClassLoading, false, \ 246912657Skvn "Trace all classes loaded") \ 247012657Skvn \ 247112657Skvn product(bool, TraceClassLoadingPreorder, false, \ 247212657Skvn "Trace all classes loaded in order referenced (not loaded)") \ 247312657Skvn \ 247412657Skvn product_rw(bool, TraceClassUnloading, false, \ 247512657Skvn "Trace unloading of classes") \ 247612657Skvn \ 247712657Skvn product_rw(bool, TraceLoaderConstraints, false, \ 247812657Skvn "Trace loader constraints") \ 247912657Skvn \ 248012657Skvn develop(bool, TraceClassLoaderData, false, \ 248112657Skvn "Trace class loader loader_data lifetime") \ 248212657Skvn \ 248312657Skvn product(size_t, InitialBootClassLoaderMetaspaceSize, \ 248412657Skvn NOT_LP64(2200*K) LP64_ONLY(4*M), \ 248512657Skvn "Initial size of the boot class loader data metaspace") \ 248612657Skvn range(30*K, max_uintx/BytesPerWord) \ 248712657Skvn constraint(InitialBootClassLoaderMetaspaceSizeConstraintFunc, AfterErgo)\ 248812657Skvn \ 248912657Skvn product(bool, TraceYoungGenTime, false, \ 249012657Skvn "Trace accumulated time for young collection") \ 249112657Skvn \ 249212657Skvn product(bool, TraceOldGenTime, false, \ 249312657Skvn "Trace accumulated time for old collection") \ 249412657Skvn \ 249512657Skvn product(bool, PrintTenuringDistribution, false, \ 249612657Skvn "Print tenuring age information") \ 249712657Skvn \ 249812657Skvn product_rw(bool, PrintHeapAtGC, false, \ 249912657Skvn "Print heap layout before and after each GC") \ 250012657Skvn \ 250112657Skvn product_rw(bool, PrintHeapAtGCExtended, false, \ 250212657Skvn "Print extended information about the layout of the heap " \ 250312657Skvn "when -XX:+PrintHeapAtGC is set") \ 250412657Skvn \ 250512657Skvn product(bool, PrintHeapAtSIGBREAK, true, \ 250612657Skvn "Print heap layout in response to SIGBREAK") \ 250712657Skvn \ 250812657Skvn manageable(bool, PrintClassHistogramBeforeFullGC, false, \ 250912657Skvn "Print a class histogram before any major stop-world GC") \ 251012657Skvn \ 251112657Skvn manageable(bool, PrintClassHistogramAfterFullGC, false, \ 251212657Skvn "Print a class histogram after any major stop-world GC") \ 251312657Skvn \ 251412657Skvn manageable(bool, PrintClassHistogram, false, \ 251512657Skvn "Print a histogram of class instances") \ 251612657Skvn \ 251712657Skvn develop(bool, TraceWorkGang, false, \ 251812657Skvn "Trace activities of work gangs") \ 251912657Skvn \ 252012657Skvn develop(bool, TraceBlockOffsetTable, false, \ 252112657Skvn "Print BlockOffsetTable maps") \ 252212657Skvn \ 252312657Skvn develop(bool, TraceCardTableModRefBS, false, \ 252412657Skvn "Print CardTableModRefBS maps") \ 252512657Skvn \ 252612657Skvn develop(bool, TraceGCTaskManager, false, \ 252712657Skvn "Trace actions of the GC task manager") \ 252812657Skvn \ 252912657Skvn develop(bool, TraceGCTaskQueue, false, \ 253012657Skvn "Trace actions of the GC task queues") \ 253112657Skvn \ 253212657Skvn diagnostic(bool, TraceGCTaskThread, false, \ 253312657Skvn "Trace actions of the GC task threads") \ 253412657Skvn \ 253512657Skvn product(bool, PrintParallelOldGCPhaseTimes, false, \ 253612657Skvn "Print the time taken by each phase in ParallelOldGC " \ 253712657Skvn "(PrintGCDetails must also be enabled)") \ 253812657Skvn \ 253912657Skvn develop(bool, TraceParallelOldGCMarkingPhase, false, \ 254012657Skvn "Trace marking phase in ParallelOldGC") \ 254112657Skvn \ 254212657Skvn develop(bool, TraceParallelOldGCSummaryPhase, false, \ 254312657Skvn "Trace summary phase in ParallelOldGC") \ 254412657Skvn \ 254512657Skvn develop(bool, TraceParallelOldGCCompactionPhase, false, \ 254612657Skvn "Trace compaction phase in ParallelOldGC") \ 254712657Skvn \ 254812657Skvn develop(bool, TraceParallelOldGCDensePrefix, false, \ 254912657Skvn "Trace dense prefix computation for ParallelOldGC") \ 255012657Skvn \ 255112657Skvn develop(bool, IgnoreLibthreadGPFault, false, \ 255212657Skvn "Suppress workaround for libthread GP fault") \ 255312657Skvn \ 255412657Skvn product(bool, PrintJNIGCStalls, false, \ 255512657Skvn "Print diagnostic message when GC is stalled " \ 255612657Skvn "by JNI critical section") \ 255712657Skvn \ 255812657Skvn experimental(double, ObjectCountCutOffPercent, 0.5, \ 255912657Skvn "The percentage of the used heap that the instances of a class " \ 256012657Skvn "must occupy for the class to generate a trace event") \ 256112657Skvn range(0.0, 100.0) \ 256212657Skvn \ 256312657Skvn /* GC log rotation setting */ \ 256412657Skvn \ 256512657Skvn product(bool, UseGCLogFileRotation, false, \ 256612657Skvn "Rotate gclog files (for long running applications). It requires "\ 256712657Skvn "-Xloggc:<filename>") \ 256812657Skvn \ 256912657Skvn product(uintx, NumberOfGCLogFiles, 0, \ 257012657Skvn "Number of gclog files in rotation " \ 257112657Skvn "(default: 0, no rotation)") \ 257212657Skvn \ 257312657Skvn product(size_t, GCLogFileSize, 8*K, \ 257412657Skvn "GC log file size, requires UseGCLogFileRotation. " \ 257512657Skvn "Set to 0 to only trigger rotation via jcmd") \ 257612657Skvn \ 257712657Skvn /* JVMTI heap profiling */ \ 257812657Skvn \ 257912657Skvn diagnostic(bool, TraceJVMTIObjectTagging, false, \ 258012657Skvn "Trace JVMTI object tagging calls") \ 258112657Skvn \ 258212657Skvn diagnostic(bool, VerifyBeforeIteration, false, \ 258312657Skvn "Verify memory system before JVMTI iteration") \ 258412657Skvn \ 258512657Skvn /* compiler interface */ \ 258612657Skvn \ 258712657Skvn develop(bool, CIPrintCompilerName, false, \ 258812657Skvn "when CIPrint is active, print the name of the active compiler") \ 258912657Skvn \ 259012657Skvn diagnostic(bool, CIPrintCompileQueue, false, \ 259112657Skvn "display the contents of the compile queue whenever a " \ 259212657Skvn "compilation is enqueued") \ 259312657Skvn \ 259412657Skvn develop(bool, CIPrintRequests, false, \ 259512657Skvn "display every request for compilation") \ 259612657Skvn \ 259712657Skvn product(bool, CITime, false, \ 259812657Skvn "collect timing information for compilation") \ 259912657Skvn \ 260012657Skvn develop(bool, CITimeVerbose, false, \ 260112657Skvn "be more verbose in compilation timings") \ 260212657Skvn \ 260312657Skvn develop(bool, CITimeEach, false, \ 260412657Skvn "display timing information after each successful compilation") \ 260512657Skvn \ 260612657Skvn develop(bool, CICountOSR, false, \ 260712657Skvn "use a separate counter when assigning ids to osr compilations") \ 260812657Skvn \ 260912657Skvn develop(bool, CICompileNatives, true, \ 261012657Skvn "compile native methods if supported by the compiler") \ 261112657Skvn \ 261212657Skvn develop_pd(bool, CICompileOSR, \ 261312657Skvn "compile on stack replacement methods if supported by the " \ 261412657Skvn "compiler") \ 261512657Skvn \ 261612657Skvn develop(bool, CIPrintMethodCodes, false, \ 261712657Skvn "print method bytecodes of the compiled code") \ 261812657Skvn \ 261912657Skvn develop(bool, CIPrintTypeFlow, false, \ 262012657Skvn "print the results of ciTypeFlow analysis") \ 262112657Skvn \ 262212657Skvn develop(bool, CITraceTypeFlow, false, \ 262312657Skvn "detailed per-bytecode tracing of ciTypeFlow analysis") \ 262412657Skvn \ 262512657Skvn develop(intx, OSROnlyBCI, -1, \ 262612657Skvn "OSR only at this bci. Negative values mean exclude that bci") \ 262712657Skvn \ 262812657Skvn /* compiler */ \ 262912657Skvn \ 263012657Skvn /* notice: the max range value here is max_jint, not max_intx */ \ 263112657Skvn /* because of overflow issue */ \ 263212657Skvn product(intx, CICompilerCount, CI_COMPILER_COUNT, \ 263312657Skvn "Number of compiler threads to run") \ 263412657Skvn range(0, max_jint) \ 263512657Skvn constraint(CICompilerCountConstraintFunc, AtParse) \ 263612657Skvn \ 263712657Skvn product(intx, CompilationPolicyChoice, 0, \ 263812657Skvn "which compilation policy (0-3)") \ 263912657Skvn range(0, 3) \ 264012657Skvn \ 264112657Skvn develop(bool, UseStackBanging, true, \ 264212657Skvn "use stack banging for stack overflow checks (required for " \ 264312657Skvn "proper StackOverflow handling; disable only to measure cost " \ 264412657Skvn "of stackbanging)") \ 264512657Skvn \ 264612657Skvn develop(bool, UseStrictFP, true, \ 264712657Skvn "use strict fp if modifier strictfp is set") \ 264812657Skvn \ 264912657Skvn develop(bool, GenerateSynchronizationCode, true, \ 265012657Skvn "generate locking/unlocking code for synchronized methods and " \ 265112657Skvn "monitors") \ 265212657Skvn \ 265312657Skvn develop(bool, GenerateCompilerNullChecks, true, \ 265412657Skvn "Generate explicit null checks for loads/stores/calls") \ 265512657Skvn \ 265612657Skvn develop(bool, GenerateRangeChecks, true, \ 265712657Skvn "Generate range checks for array accesses") \ 265812657Skvn \ 265912657Skvn develop_pd(bool, ImplicitNullChecks, \ 266012657Skvn "Generate code for implicit null checks") \ 266112657Skvn \ 266212657Skvn product_pd(bool, TrapBasedNullChecks, \ 266312657Skvn "Generate code for null checks that uses a cmp and trap " \ 266412657Skvn "instruction raising SIGTRAP. This is only used if an access to" \ 266512657Skvn "null (+offset) will not raise a SIGSEGV, i.e.," \ 266612657Skvn "ImplicitNullChecks don't work (PPC64).") \ 266712657Skvn \ 266812657Skvn product(bool, PrintSafepointStatistics, false, \ 266912657Skvn "Print statistics about safepoint synchronization") \ 267012657Skvn \ 267112657Skvn product(intx, PrintSafepointStatisticsCount, 300, \ 267212657Skvn "Total number of safepoint statistics collected " \ 267312657Skvn "before printing them out") \ 267412657Skvn range(1, max_intx) \ 267512657Skvn \ 267612657Skvn product(intx, PrintSafepointStatisticsTimeout, -1, \ 267712657Skvn "Print safepoint statistics only when safepoint takes " \ 267812657Skvn "more than PrintSafepointSatisticsTimeout in millis") \ 267912657Skvn LP64_ONLY(range(-1, max_intx/MICROUNITS)) \ 268012657Skvn NOT_LP64(range(-1, max_intx)) \ 268112657Skvn \ 268212657Skvn product(bool, TraceSafepointCleanupTime, false, \ 268312657Skvn "Print the break down of clean up tasks performed during " \ 268412657Skvn "safepoint") \ 268512657Skvn \ 268612657Skvn product(bool, Inline, true, \ 268712657Skvn "Enable inlining") \ 268812657Skvn \ 268912657Skvn product(bool, ClipInlining, true, \ 2690 "Clip inlining if aggregate method exceeds DesiredMethodLimit") \ 2691 \ 2692 develop(bool, UseCHA, true, \ 2693 "Enable CHA") \ 2694 \ 2695 product(bool, UseTypeProfile, true, \ 2696 "Check interpreter profile for historically monomorphic calls") \ 2697 \ 2698 diagnostic(bool, PrintInlining, false, \ 2699 "Print inlining optimizations") \ 2700 \ 2701 product(bool, UsePopCountInstruction, false, \ 2702 "Use population count instruction") \ 2703 \ 2704 develop(bool, EagerInitialization, false, \ 2705 "Eagerly initialize classes if possible") \ 2706 \ 2707 diagnostic(bool, LogTouchedMethods, false, \ 2708 "Log methods which have been ever touched in runtime") \ 2709 \ 2710 diagnostic(bool, PrintTouchedMethodsAtExit, false, \ 2711 "Print all methods that have been ever touched in runtime") \ 2712 \ 2713 develop(bool, TraceMethodReplacement, false, \ 2714 "Print when methods are replaced do to recompilation") \ 2715 \ 2716 develop(bool, PrintMethodFlushing, false, \ 2717 "Print the nmethods being flushed") \ 2718 \ 2719 diagnostic(bool, PrintMethodFlushingStatistics, false, \ 2720 "print statistics about method flushing") \ 2721 \ 2722 diagnostic(intx, HotMethodDetectionLimit, 100000, \ 2723 "Number of compiled code invocations after which " \ 2724 "the method is considered as hot by the flusher") \ 2725 range(1, max_jint) \ 2726 \ 2727 diagnostic(intx, MinPassesBeforeFlush, 10, \ 2728 "Minimum number of sweeper passes before an nmethod " \ 2729 "can be flushed") \ 2730 range(0, max_intx) \ 2731 \ 2732 product(bool, UseCodeAging, true, \ 2733 "Insert counter to detect warm methods") \ 2734 \ 2735 diagnostic(bool, StressCodeAging, false, \ 2736 "Start with counters compiled in") \ 2737 \ 2738 develop(bool, UseRelocIndex, false, \ 2739 "Use an index to speed random access to relocations") \ 2740 \ 2741 develop(bool, StressCodeBuffers, false, \ 2742 "Exercise code buffer expansion and other rare state changes") \ 2743 \ 2744 diagnostic(bool, DebugNonSafepoints, trueInDebug, \ 2745 "Generate extra debugging information for non-safepoints in " \ 2746 "nmethods") \ 2747 \ 2748 product(bool, PrintVMOptions, false, \ 2749 "Print flags that appeared on the command line") \ 2750 \ 2751 product(bool, IgnoreUnrecognizedVMOptions, false, \ 2752 "Ignore unrecognized VM options") \ 2753 \ 2754 product(bool, PrintCommandLineFlags, false, \ 2755 "Print flags specified on command line or set by ergonomics") \ 2756 \ 2757 product(bool, PrintFlagsInitial, false, \ 2758 "Print all VM flags before argument processing and exit VM") \ 2759 \ 2760 product(bool, PrintFlagsFinal, false, \ 2761 "Print all VM flags after argument and ergonomic processing") \ 2762 \ 2763 notproduct(bool, PrintFlagsWithComments, false, \ 2764 "Print all VM flags with default values and descriptions and " \ 2765 "exit") \ 2766 \ 2767 product(bool, PrintFlagsRanges, false, \ 2768 "Print VM flags and their ranges and exit VM") \ 2769 \ 2770 diagnostic(bool, SerializeVMOutput, true, \ 2771 "Use a mutex to serialize output to tty and LogFile") \ 2772 \ 2773 diagnostic(bool, DisplayVMOutput, true, \ 2774 "Display all VM output on the tty, independently of LogVMOutput") \ 2775 \ 2776 diagnostic(bool, LogVMOutput, false, \ 2777 "Save VM output to LogFile") \ 2778 \ 2779 diagnostic(ccstr, LogFile, NULL, \ 2780 "If LogVMOutput or LogCompilation is on, save VM output to " \ 2781 "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ 2782 \ 2783 product(ccstr, ErrorFile, NULL, \ 2784 "If an error occurs, save the error data to this file " \ 2785 "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 2786 \ 2787 product(bool, DisplayVMOutputToStderr, false, \ 2788 "If DisplayVMOutput is true, display all VM output to stderr") \ 2789 \ 2790 product(bool, DisplayVMOutputToStdout, false, \ 2791 "If DisplayVMOutput is true, display all VM output to stdout") \ 2792 \ 2793 product(bool, UseHeavyMonitors, false, \ 2794 "use heavyweight instead of lightweight Java monitors") \ 2795 \ 2796 product(bool, PrintStringTableStatistics, false, \ 2797 "print statistics about the StringTable and SymbolTable") \ 2798 \ 2799 diagnostic(bool, VerifyStringTableAtExit, false, \ 2800 "verify StringTable contents at exit") \ 2801 \ 2802 notproduct(bool, PrintSymbolTableSizeHistogram, false, \ 2803 "print histogram of the symbol table") \ 2804 \ 2805 notproduct(bool, ExitVMOnVerifyError, false, \ 2806 "standard exit from VM if bytecode verify error " \ 2807 "(only in debug mode)") \ 2808 \ 2809 diagnostic(ccstr, AbortVMOnException, NULL, \ 2810 "Call fatal if this exception is thrown. Example: " \ 2811 "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \ 2812 \ 2813 diagnostic(ccstr, AbortVMOnExceptionMessage, NULL, \ 2814 "Call fatal if the exception pointed by AbortVMOnException " \ 2815 "has this message") \ 2816 \ 2817 develop(bool, DebugVtables, false, \ 2818 "add debugging code to vtable dispatch") \ 2819 \ 2820 develop(bool, PrintVtables, false, \ 2821 "print vtables when printing klass") \ 2822 \ 2823 notproduct(bool, PrintVtableStats, false, \ 2824 "print vtables stats at end of run") \ 2825 \ 2826 develop(bool, TraceCreateZombies, false, \ 2827 "trace creation of zombie nmethods") \ 2828 \ 2829 notproduct(bool, IgnoreLockingAssertions, false, \ 2830 "disable locking assertions (for speed)") \ 2831 \ 2832 product(bool, RangeCheckElimination, true, \ 2833 "Eliminate range checks") \ 2834 \ 2835 develop_pd(bool, UncommonNullCast, \ 2836 "track occurrences of null in casts; adjust compiler tactics") \ 2837 \ 2838 develop(bool, TypeProfileCasts, true, \ 2839 "treat casts like calls for purposes of type profiling") \ 2840 \ 2841 develop(bool, DelayCompilationDuringStartup, true, \ 2842 "Delay invoking the compiler until main application class is " \ 2843 "loaded") \ 2844 \ 2845 develop(bool, CompileTheWorld, false, \ 2846 "Compile all methods in all classes in bootstrap class path " \ 2847 "(stress test)") \ 2848 \ 2849 develop(bool, CompileTheWorldPreloadClasses, true, \ 2850 "Preload all classes used by a class before start loading") \ 2851 \ 2852 notproduct(intx, CompileTheWorldSafepointInterval, 100, \ 2853 "Force a safepoint every n compiles so sweeper can keep up") \ 2854 \ 2855 develop(bool, FillDelaySlots, true, \ 2856 "Fill delay slots (on SPARC only)") \ 2857 \ 2858 develop(bool, TimeLivenessAnalysis, false, \ 2859 "Time computation of bytecode liveness analysis") \ 2860 \ 2861 develop(bool, TraceLivenessGen, false, \ 2862 "Trace the generation of liveness analysis information") \ 2863 \ 2864 notproduct(bool, TraceLivenessQuery, false, \ 2865 "Trace queries of liveness analysis information") \ 2866 \ 2867 notproduct(bool, CollectIndexSetStatistics, false, \ 2868 "Collect information about IndexSets") \ 2869 \ 2870 develop(bool, UseLoopSafepoints, true, \ 2871 "Generate Safepoint nodes in every loop") \ 2872 \ 2873 develop(intx, FastAllocateSizeLimit, 128*K, \ 2874 /* Note: This value is zero mod 1<<13 for a cheap sparc set. */ \ 2875 "Inline allocations larger than this in doublewords must go slow")\ 2876 \ 2877 product(bool, AggressiveOpts, false, \ 2878 "Enable aggressive optimizations - see arguments.cpp") \ 2879 \ 2880 product_pd(bool, CompactStrings, \ 2881 "Enable Strings to use single byte chars in backing store") \ 2882 \ 2883 product_pd(uintx, TypeProfileLevel, \ 2884 "=XYZ, with Z: Type profiling of arguments at call; " \ 2885 "Y: Type profiling of return value at call; " \ 2886 "X: Type profiling of parameters to methods; " \ 2887 "X, Y and Z in 0=off ; 1=jsr292 only; 2=all methods") \ 2888 constraint(TypeProfileLevelConstraintFunc, AfterErgo) \ 2889 \ 2890 product(intx, TypeProfileArgsLimit, 2, \ 2891 "max number of call arguments to consider for type profiling") \ 2892 range(0, 16) \ 2893 \ 2894 product(intx, TypeProfileParmsLimit, 2, \ 2895 "max number of incoming parameters to consider for type profiling"\ 2896 ", -1 for all") \ 2897 range(-1, 64) \ 2898 \ 2899 /* statistics */ \ 2900 develop(bool, CountCompiledCalls, false, \ 2901 "Count method invocations") \ 2902 \ 2903 notproduct(bool, CountRuntimeCalls, false, \ 2904 "Count VM runtime calls") \ 2905 \ 2906 develop(bool, CountJNICalls, false, \ 2907 "Count jni method invocations") \ 2908 \ 2909 notproduct(bool, CountJVMCalls, false, \ 2910 "Count jvm method invocations") \ 2911 \ 2912 notproduct(bool, CountRemovableExceptions, false, \ 2913 "Count exceptions that could be replaced by branches due to " \ 2914 "inlining") \ 2915 \ 2916 notproduct(bool, ICMissHistogram, false, \ 2917 "Produce histogram of IC misses") \ 2918 \ 2919 /* interpreter */ \ 2920 develop(bool, ClearInterpreterLocals, false, \ 2921 "Always clear local variables of interpreter activations upon " \ 2922 "entry") \ 2923 \ 2924 product_pd(bool, RewriteBytecodes, \ 2925 "Allow rewriting of bytecodes (bytecodes are not immutable)") \ 2926 \ 2927 product_pd(bool, RewriteFrequentPairs, \ 2928 "Rewrite frequently used bytecode pairs into a single bytecode") \ 2929 \ 2930 diagnostic(bool, PrintInterpreter, false, \ 2931 "Print the generated interpreter code") \ 2932 \ 2933 product(bool, UseInterpreter, true, \ 2934 "Use interpreter for non-compiled methods") \ 2935 \ 2936 develop(bool, UseFastSignatureHandlers, true, \ 2937 "Use fast signature handlers for native calls") \ 2938 \ 2939 product(bool, UseLoopCounter, true, \ 2940 "Increment invocation counter on backward branch") \ 2941 \ 2942 product_pd(bool, UseOnStackReplacement, \ 2943 "Use on stack replacement, calls runtime if invoc. counter " \ 2944 "overflows in loop") \ 2945 \ 2946 notproduct(bool, TraceOnStackReplacement, false, \ 2947 "Trace on stack replacement") \ 2948 \ 2949 product_pd(bool, PreferInterpreterNativeStubs, \ 2950 "Use always interpreter stubs for native methods invoked via " \ 2951 "interpreter") \ 2952 \ 2953 develop(bool, CountBytecodes, false, \ 2954 "Count number of bytecodes executed") \ 2955 \ 2956 develop(bool, PrintBytecodeHistogram, false, \ 2957 "Print histogram of the executed bytecodes") \ 2958 \ 2959 develop(bool, PrintBytecodePairHistogram, false, \ 2960 "Print histogram of the executed bytecode pairs") \ 2961 \ 2962 diagnostic(bool, PrintSignatureHandlers, false, \ 2963 "Print code generated for native method signature handlers") \ 2964 \ 2965 develop(bool, VerifyOops, false, \ 2966 "Do plausibility checks for oops") \ 2967 \ 2968 develop(bool, CheckUnhandledOops, false, \ 2969 "Check for unhandled oops in VM code") \ 2970 \ 2971 develop(bool, VerifyJNIFields, trueInDebug, \ 2972 "Verify jfieldIDs for instance fields") \ 2973 \ 2974 notproduct(bool, VerifyJNIEnvThread, false, \ 2975 "Verify JNIEnv.thread == Thread::current() when entering VM " \ 2976 "from JNI") \ 2977 \ 2978 develop(bool, VerifyFPU, false, \ 2979 "Verify FPU state (check for NaN's, etc.)") \ 2980 \ 2981 develop(bool, VerifyThread, false, \ 2982 "Watch the thread register for corruption (SPARC only)") \ 2983 \ 2984 develop(bool, VerifyActivationFrameSize, false, \ 2985 "Verify that activation frame didn't become smaller than its " \ 2986 "minimal size") \ 2987 \ 2988 develop(bool, TraceFrequencyInlining, false, \ 2989 "Trace frequency based inlining") \ 2990 \ 2991 develop_pd(bool, InlineIntrinsics, \ 2992 "Inline intrinsics that can be statically resolved") \ 2993 \ 2994 product_pd(bool, ProfileInterpreter, \ 2995 "Profile at the bytecode level during interpretation") \ 2996 \ 2997 develop(bool, TraceProfileInterpreter, false, \ 2998 "Trace profiling at the bytecode level during interpretation. " \ 2999 "This outputs the profiling information collected to improve " \ 3000 "jit compilation.") \ 3001 \ 3002 develop_pd(bool, ProfileTraps, \ 3003 "Profile deoptimization traps at the bytecode level") \ 3004 \ 3005 product(intx, ProfileMaturityPercentage, 20, \ 3006 "number of method invocations/branches (expressed as % of " \ 3007 "CompileThreshold) before using the method's profile") \ 3008 range(0, 100) \ 3009 \ 3010 diagnostic(bool, PrintMethodData, false, \ 3011 "Print the results of +ProfileInterpreter at end of run") \ 3012 \ 3013 develop(bool, VerifyDataPointer, trueInDebug, \ 3014 "Verify the method data pointer during interpreter profiling") \ 3015 \ 3016 develop(bool, VerifyCompiledCode, false, \ 3017 "Include miscellaneous runtime verifications in nmethod code; " \ 3018 "default off because it disturbs nmethod size heuristics") \ 3019 \ 3020 notproduct(bool, CrashGCForDumpingJavaThread, false, \ 3021 "Manually make GC thread crash then dump java stack trace; " \ 3022 "Test only") \ 3023 \ 3024 /* compilation */ \ 3025 product(bool, UseCompiler, true, \ 3026 "Use Just-In-Time compilation") \ 3027 \ 3028 develop(bool, TraceCompilationPolicy, false, \ 3029 "Trace compilation policy") \ 3030 \ 3031 develop(bool, TimeCompilationPolicy, false, \ 3032 "Time the compilation policy") \ 3033 \ 3034 product(bool, UseCounterDecay, true, \ 3035 "Adjust recompilation counters") \ 3036 \ 3037 develop(intx, CounterHalfLifeTime, 30, \ 3038 "Half-life time of invocation counters (in seconds)") \ 3039 \ 3040 develop(intx, CounterDecayMinIntervalLength, 500, \ 3041 "The minimum interval (in milliseconds) between invocation of " \ 3042 "CounterDecay") \ 3043 \ 3044 product(bool, AlwaysCompileLoopMethods, false, \ 3045 "When using recompilation, never interpret methods " \ 3046 "containing loops") \ 3047 \ 3048 product(bool, DontCompileHugeMethods, true, \ 3049 "Do not compile methods > HugeMethodLimit") \ 3050 \ 3051 /* Bytecode escape analysis estimation. */ \ 3052 product(bool, EstimateArgEscape, true, \ 3053 "Analyze bytecodes to estimate escape state of arguments") \ 3054 \ 3055 product(intx, BCEATraceLevel, 0, \ 3056 "How much tracing to do of bytecode escape analysis estimates " \ 3057 "(0-3)") \ 3058 range(0, 3) \ 3059 \ 3060 product(intx, MaxBCEAEstimateLevel, 5, \ 3061 "Maximum number of nested calls that are analyzed by BC EA") \ 3062 range(0, max_jint) \ 3063 \ 3064 product(intx, MaxBCEAEstimateSize, 150, \ 3065 "Maximum bytecode size of a method to be analyzed by BC EA") \ 3066 range(0, max_jint) \ 3067 \ 3068 product(intx, AllocatePrefetchStyle, 1, \ 3069 "0 = no prefetch, " \ 3070 "1 = prefetch instructions for each allocation, " \ 3071 "2 = use TLAB watermark to gate allocation prefetch, " \ 3072 "3 = use BIS instruction on Sparc for allocation prefetch") \ 3073 range(0, 3) \ 3074 \ 3075 product(intx, AllocatePrefetchDistance, -1, \ 3076 "Distance to prefetch ahead of allocation pointer. " \ 3077 "-1: use system-specific value (automatically determined") \ 3078 constraint(AllocatePrefetchDistanceConstraintFunc, AfterMemoryInit)\ 3079 \ 3080 product(intx, AllocatePrefetchLines, 3, \ 3081 "Number of lines to prefetch ahead of array allocation pointer") \ 3082 range(1, max_jint / 2) \ 3083 \ 3084 product(intx, AllocateInstancePrefetchLines, 1, \ 3085 "Number of lines to prefetch ahead of instance allocation " \ 3086 "pointer") \ 3087 range(1, max_jint / 2) \ 3088 \ 3089 product(intx, AllocatePrefetchStepSize, 16, \ 3090 "Step size in bytes of sequential prefetch instructions") \ 3091 range(1, max_jint) \ 3092 constraint(AllocatePrefetchStepSizeConstraintFunc,AfterMemoryInit)\ 3093 \ 3094 product(intx, AllocatePrefetchInstr, 0, \ 3095 "Prefetch instruction to prefetch ahead of allocation pointer") \ 3096 constraint(AllocatePrefetchInstrConstraintFunc, AfterErgo) \ 3097 \ 3098 /* deoptimization */ \ 3099 develop(bool, TraceDeoptimization, false, \ 3100 "Trace deoptimization") \ 3101 \ 3102 develop(bool, PrintDeoptimizationDetails, false, \ 3103 "Print more information about deoptimization") \ 3104 \ 3105 develop(bool, DebugDeoptimization, false, \ 3106 "Tracing various information while debugging deoptimization") \ 3107 \ 3108 product(intx, SelfDestructTimer, 0, \ 3109 "Will cause VM to terminate after a given time (in minutes) " \ 3110 "(0 means off)") \ 3111 range(0, max_intx) \ 3112 \ 3113 product(intx, MaxJavaStackTraceDepth, 1024, \ 3114 "The maximum number of lines in the stack trace for Java " \ 3115 "exceptions (0 means all)") \ 3116 range(0, max_jint/2) \ 3117 \ 3118 develop(bool, TraceStackWalk, false, \ 3119 "Trace stack walking") \ 3120 \ 3121 product(bool, MemberNameInStackFrame, true, \ 3122 "Use MemberName in StackFrame") \ 3123 \ 3124 /* notice: the max range value here is max_jint, not max_intx */ \ 3125 /* because of overflow issue */ \ 3126 NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \ 3127 "Guarantee a safepoint (at least) every so many milliseconds " \ 3128 "(0 means none)")) \ 3129 NOT_EMBEDDED(range(0, max_jint)) \ 3130 \ 3131 EMBEDDED_ONLY(product(intx, GuaranteedSafepointInterval, 0, \ 3132 "Guarantee a safepoint (at least) every so many milliseconds " \ 3133 "(0 means none)")) \ 3134 EMBEDDED_ONLY(range(0, max_jint)) \ 3135 \ 3136 product(intx, SafepointTimeoutDelay, 10000, \ 3137 "Delay in milliseconds for option SafepointTimeout") \ 3138 LP64_ONLY(range(0, max_intx/MICROUNITS)) \ 3139 NOT_LP64(range(0, max_intx)) \ 3140 \ 3141 product(intx, NmethodSweepActivity, 10, \ 3142 "Removes cold nmethods from code cache if > 0. Higher values " \ 3143 "result in more aggressive sweeping") \ 3144 range(0, 2000) \ 3145 \ 3146 notproduct(bool, LogSweeper, false, \ 3147 "Keep a ring buffer of sweeper activity") \ 3148 \ 3149 notproduct(intx, SweeperLogEntries, 1024, \ 3150 "Number of records in the ring buffer of sweeper activity") \ 3151 \ 3152 notproduct(intx, MemProfilingInterval, 500, \ 3153 "Time between each invocation of the MemProfiler") \ 3154 \ 3155 develop(intx, MallocCatchPtr, -1, \ 3156 "Hit breakpoint when mallocing/freeing this pointer") \ 3157 \ 3158 notproduct(ccstrlist, SuppressErrorAt, "", \ 3159 "List of assertions (file:line) to muzzle") \ 3160 \ 3161 notproduct(size_t, HandleAllocationLimit, 1024, \ 3162 "Threshold for HandleMark allocation when +TraceHandleAllocation "\ 3163 "is used") \ 3164 \ 3165 develop(size_t, TotalHandleAllocationLimit, 1024, \ 3166 "Threshold for total handle allocation when " \ 3167 "+TraceHandleAllocation is used") \ 3168 \ 3169 develop(intx, StackPrintLimit, 100, \ 3170 "number of stack frames to print in VM-level stack dump") \ 3171 \ 3172 notproduct(intx, MaxElementPrintSize, 256, \ 3173 "maximum number of elements to print") \ 3174 \ 3175 notproduct(intx, MaxSubklassPrintSize, 4, \ 3176 "maximum number of subklasses to print when printing klass") \ 3177 \ 3178 product(intx, MaxInlineLevel, 9, \ 3179 "maximum number of nested calls that are inlined") \ 3180 range(0, max_jint) \ 3181 \ 3182 product(intx, MaxRecursiveInlineLevel, 1, \ 3183 "maximum number of nested recursive calls that are inlined") \ 3184 range(0, max_jint) \ 3185 \ 3186 develop(intx, MaxForceInlineLevel, 100, \ 3187 "maximum number of nested calls that are forced for inlining " \ 3188 "(using CompileCommand or marked w/ @ForceInline)") \ 3189 range(0, max_jint) \ 3190 \ 3191 product_pd(intx, InlineSmallCode, \ 3192 "Only inline already compiled methods if their code size is " \ 3193 "less than this") \ 3194 range(0, max_jint) \ 3195 \ 3196 product(intx, MaxInlineSize, 35, \ 3197 "The maximum bytecode size of a method to be inlined") \ 3198 range(0, max_jint) \ 3199 \ 3200 product_pd(intx, FreqInlineSize, \ 3201 "The maximum bytecode size of a frequent method to be inlined") \ 3202 range(0, max_jint) \ 3203 \ 3204 product(intx, MaxTrivialSize, 6, \ 3205 "The maximum bytecode size of a trivial method to be inlined") \ 3206 range(0, max_jint) \ 3207 \ 3208 product(intx, MinInliningThreshold, 250, \ 3209 "The minimum invocation count a method needs to have to be " \ 3210 "inlined") \ 3211 range(0, max_jint) \ 3212 \ 3213 develop(intx, MethodHistogramCutoff, 100, \ 3214 "The cutoff value for method invocation histogram (+CountCalls)") \ 3215 \ 3216 develop(intx, ProfilerNumberOfInterpretedMethods, 25, \ 3217 "Number of interpreted methods to show in profile") \ 3218 \ 3219 develop(intx, ProfilerNumberOfCompiledMethods, 25, \ 3220 "Number of compiled methods to show in profile") \ 3221 \ 3222 develop(intx, ProfilerNumberOfStubMethods, 25, \ 3223 "Number of stub methods to show in profile") \ 3224 \ 3225 develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ 3226 "Number of runtime stub nodes to show in profile") \ 3227 \ 3228 product(intx, ProfileIntervalsTicks, 100, \ 3229 "Number of ticks between printing of interval profile " \ 3230 "(+ProfileIntervals)") \ 3231 range(0, max_intx) \ 3232 \ 3233 notproduct(intx, ScavengeALotInterval, 1, \ 3234 "Interval between which scavenge will occur with +ScavengeALot") \ 3235 \ 3236 notproduct(intx, FullGCALotInterval, 1, \ 3237 "Interval between which full gc will occur with +FullGCALot") \ 3238 \ 3239 notproduct(intx, FullGCALotStart, 0, \ 3240 "For which invocation to start FullGCAlot") \ 3241 \ 3242 notproduct(intx, FullGCALotDummies, 32*K, \ 3243 "Dummy object allocated with +FullGCALot, forcing all objects " \ 3244 "to move") \ 3245 \ 3246 develop(intx, DontYieldALotInterval, 10, \ 3247 "Interval between which yields will be dropped (milliseconds)") \ 3248 \ 3249 develop(intx, MinSleepInterval, 1, \ 3250 "Minimum sleep() interval (milliseconds) when " \ 3251 "ConvertSleepToYield is off (used for Solaris)") \ 3252 \ 3253 develop(intx, ProfilerPCTickThreshold, 15, \ 3254 "Number of ticks in a PC buckets to be a hotspot") \ 3255 \ 3256 notproduct(intx, DeoptimizeALotInterval, 5, \ 3257 "Number of exits until DeoptimizeALot kicks in") \ 3258 \ 3259 notproduct(intx, ZombieALotInterval, 5, \ 3260 "Number of exits until ZombieALot kicks in") \ 3261 \ 3262 diagnostic(intx, MallocVerifyInterval, 0, \ 3263 "If non-zero, verify C heap after every N calls to " \ 3264 "malloc/realloc/free") \ 3265 range(0, max_intx) \ 3266 \ 3267 diagnostic(intx, MallocVerifyStart, 0, \ 3268 "If non-zero, start verifying C heap after Nth call to " \ 3269 "malloc/realloc/free") \ 3270 range(0, max_intx) \ 3271 \ 3272 diagnostic(uintx, MallocMaxTestWords, 0, \ 3273 "If non-zero, maximum number of words that malloc/realloc can " \ 3274 "allocate (for testing only)") \ 3275 range(0, max_uintx) \ 3276 \ 3277 product(intx, TypeProfileWidth, 2, \ 3278 "Number of receiver types to record in call/cast profile") \ 3279 range(0, 8) \ 3280 \ 3281 experimental(intx, MethodProfileWidth, 0, \ 3282 "Number of methods to record in call profile") \ 3283 \ 3284 develop(intx, BciProfileWidth, 2, \ 3285 "Number of return bci's to record in ret profile") \ 3286 \ 3287 product(intx, PerMethodRecompilationCutoff, 400, \ 3288 "After recompiling N times, stay in the interpreter (-1=>'Inf')") \ 3289 range(-1, max_intx) \ 3290 \ 3291 product(intx, PerBytecodeRecompilationCutoff, 200, \ 3292 "Per-BCI limit on repeated recompilation (-1=>'Inf')") \ 3293 range(-1, max_intx) \ 3294 \ 3295 product(intx, PerMethodTrapLimit, 100, \ 3296 "Limit on traps (of one kind) in a method (includes inlines)") \ 3297 range(0, max_jint) \ 3298 \ 3299 experimental(intx, PerMethodSpecTrapLimit, 5000, \ 3300 "Limit on speculative traps (of one kind) in a method " \ 3301 "(includes inlines)") \ 3302 range(0, max_jint) \ 3303 \ 3304 product(intx, PerBytecodeTrapLimit, 4, \ 3305 "Limit on traps (of one kind) at a particular BCI") \ 3306 range(0, max_jint) \ 3307 \ 3308 experimental(intx, SpecTrapLimitExtraEntries, 3, \ 3309 "Extra method data trap entries for speculation") \ 3310 \ 3311 develop(intx, InlineFrequencyRatio, 20, \ 3312 "Ratio of call site execution to caller method invocation") \ 3313 range(0, max_jint) \ 3314 \ 3315 develop_pd(intx, InlineFrequencyCount, \ 3316 "Count of call site execution necessary to trigger frequent " \ 3317 "inlining") \ 3318 range(0, max_jint) \ 3319 \ 3320 develop(intx, InlineThrowCount, 50, \ 3321 "Force inlining of interpreted methods that throw this often") \ 3322 range(0, max_jint) \ 3323 \ 3324 develop(intx, InlineThrowMaxSize, 200, \ 3325 "Force inlining of throwing methods smaller than this") \ 3326 range(0, max_jint) \ 3327 \ 3328 develop(intx, ProfilerNodeSize, 1024, \ 3329 "Size in K to allocate for the Profile Nodes of each thread") \ 3330 range(0, 1024) \ 3331 \ 3332 /* gc parameters */ \ 3333 product(size_t, InitialHeapSize, 0, \ 3334 "Initial heap size (in bytes); zero means use ergonomics") \ 3335 constraint(InitialHeapSizeConstraintFunc,AfterErgo) \ 3336 \ 3337 product(size_t, MaxHeapSize, ScaleForWordSize(96*M), \ 3338 "Maximum heap size (in bytes)") \ 3339 constraint(MaxHeapSizeConstraintFunc,AfterErgo) \ 3340 \ 3341 product(size_t, OldSize, ScaleForWordSize(4*M), \ 3342 "Initial tenured generation size (in bytes)") \ 3343 \ 3344 product(size_t, NewSize, ScaleForWordSize(1*M), \ 3345 "Initial new generation size (in bytes)") \ 3346 constraint(NewSizeConstraintFunc,AfterErgo) \ 3347 \ 3348 product(size_t, MaxNewSize, max_uintx, \ 3349 "Maximum new generation size (in bytes), max_uintx means set " \ 3350 "ergonomically") \ 3351 \ 3352 product(size_t, PretenureSizeThreshold, 0, \ 3353 "Maximum size in bytes of objects allocated in DefNew " \ 3354 "generation; zero means no maximum") \ 3355 \ 3356 product(size_t, MinTLABSize, 2*K, \ 3357 "Minimum allowed TLAB size (in bytes)") \ 3358 range(1, max_uintx) \ 3359 constraint(MinTLABSizeConstraintFunc,AfterMemoryInit) \ 3360 \ 3361 product(size_t, TLABSize, 0, \ 3362 "Starting TLAB size (in bytes); zero means set ergonomically") \ 3363 constraint(TLABSizeConstraintFunc,AfterMemoryInit) \ 3364 \ 3365 product(size_t, YoungPLABSize, 4096, \ 3366 "Size of young gen promotion LAB's (in HeapWords)") \ 3367 constraint(YoungPLABSizeConstraintFunc,AfterMemoryInit) \ 3368 \ 3369 product(size_t, OldPLABSize, 1024, \ 3370 "Size of old gen promotion LAB's (in HeapWords), or Number " \ 3371 "of blocks to attempt to claim when refilling CMS LAB's") \ 3372 constraint(OldPLABSizeConstraintFunc,AfterMemoryInit) \ 3373 \ 3374 product(uintx, TLABAllocationWeight, 35, \ 3375 "Allocation averaging weight") \ 3376 range(0, 100) \ 3377 \ 3378 /* Limit the lower bound of this flag to 1 as it is used */ \ 3379 /* in a division expression. */ \ 3380 product(uintx, TLABWasteTargetPercent, 1, \ 3381 "Percentage of Eden that can be wasted") \ 3382 range(1, 100) \ 3383 \ 3384 product(uintx, TLABRefillWasteFraction, 64, \ 3385 "Maximum TLAB waste at a refill (internal fragmentation)") \ 3386 range(1, max_uintx) \ 3387 \ 3388 product(uintx, TLABWasteIncrement, 4, \ 3389 "Increment allowed waste at slow allocation") \ 3390 \ 3391 product(uintx, SurvivorRatio, 8, \ 3392 "Ratio of eden/survivor space size") \ 3393 range(1, max_uintx-2) \ 3394 constraint(SurvivorRatioConstraintFunc,AfterMemoryInit) \ 3395 \ 3396 product(uintx, NewRatio, 2, \ 3397 "Ratio of old/new generation sizes") \ 3398 range(0, max_uintx-1) \ 3399 \ 3400 product_pd(size_t, NewSizeThreadIncrease, \ 3401 "Additional size added to desired new generation size per " \ 3402 "non-daemon thread (in bytes)") \ 3403 \ 3404 product_pd(size_t, MetaspaceSize, \ 3405 "Initial size of Metaspaces (in bytes)") \ 3406 constraint(MetaspaceSizeConstraintFunc,AfterErgo) \ 3407 \ 3408 product(size_t, MaxMetaspaceSize, max_uintx, \ 3409 "Maximum size of Metaspaces (in bytes)") \ 3410 constraint(MaxMetaspaceSizeConstraintFunc,AfterErgo) \ 3411 \ 3412 product(size_t, CompressedClassSpaceSize, 1*G, \ 3413 "Maximum size of class area in Metaspace when compressed " \ 3414 "class pointers are used") \ 3415 range(1*M, 3*G) \ 3416 \ 3417 manageable(uintx, MinHeapFreeRatio, 40, \ 3418 "The minimum percentage of heap free after GC to avoid expansion."\ 3419 " For most GCs this applies to the old generation. In G1 and" \ 3420 " ParallelGC it applies to the whole heap.") \ 3421 range(0, 100) \ 3422 constraint(MinHeapFreeRatioConstraintFunc,AfterErgo) \ 3423 \ 3424 manageable(uintx, MaxHeapFreeRatio, 70, \ 3425 "The maximum percentage of heap free after GC to avoid shrinking."\ 3426 " For most GCs this applies to the old generation. In G1 and" \ 3427 " ParallelGC it applies to the whole heap.") \ 3428 range(0, 100) \ 3429 constraint(MaxHeapFreeRatioConstraintFunc,AfterErgo) \ 3430 \ 3431 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ 3432 "Number of milliseconds per MB of free space in the heap") \ 3433 range(0, max_intx) \ 3434 constraint(SoftRefLRUPolicyMSPerMBConstraintFunc,AfterMemoryInit) \ 3435 \ 3436 product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ 3437 "The minimum change in heap space due to GC (in bytes)") \ 3438 \ 3439 product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ 3440 "The minimum expansion of Metaspace (in bytes)") \ 3441 \ 3442 product(uintx, MaxMetaspaceFreeRatio, 70, \ 3443 "The maximum percentage of Metaspace free after GC to avoid " \ 3444 "shrinking") \ 3445 range(0, 100) \ 3446 constraint(MaxMetaspaceFreeRatioConstraintFunc,AfterErgo) \ 3447 \ 3448 product(uintx, MinMetaspaceFreeRatio, 40, \ 3449 "The minimum percentage of Metaspace free after GC to avoid " \ 3450 "expansion") \ 3451 range(0, 99) \ 3452 constraint(MinMetaspaceFreeRatioConstraintFunc,AfterErgo) \ 3453 \ 3454 product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ 3455 "The maximum expansion of Metaspace without full GC (in bytes)") \ 3456 \ 3457 product(uintx, QueuedAllocationWarningCount, 0, \ 3458 "Number of times an allocation that queues behind a GC " \ 3459 "will retry before printing a warning") \ 3460 \ 3461 diagnostic(uintx, VerifyGCStartAt, 0, \ 3462 "GC invoke count where +VerifyBefore/AfterGC kicks in") \ 3463 \ 3464 diagnostic(intx, VerifyGCLevel, 0, \ 3465 "Generation level at which to start +VerifyBefore/AfterGC") \ 3466 range(0, 1) \ 3467 \ 3468 product(uintx, MaxTenuringThreshold, 15, \ 3469 "Maximum value for tenuring threshold") \ 3470 range(0, markOopDesc::max_age + 1) \ 3471 constraint(MaxTenuringThresholdConstraintFunc,AfterErgo) \ 3472 \ 3473 product(uintx, InitialTenuringThreshold, 7, \ 3474 "Initial value for tenuring threshold") \ 3475 range(0, markOopDesc::max_age + 1) \ 3476 constraint(InitialTenuringThresholdConstraintFunc,AfterErgo) \ 3477 \ 3478 product(uintx, TargetSurvivorRatio, 50, \ 3479 "Desired percentage of survivor space used after scavenge") \ 3480 range(0, 100) \ 3481 \ 3482 product(uintx, MarkSweepDeadRatio, 5, \ 3483 "Percentage (0-100) of the old gen allowed as dead wood. " \ 3484 "Serial mark sweep treats this as both the minimum and maximum " \ 3485 "value. " \ 3486 "CMS uses this value only if it falls back to mark sweep. " \ 3487 "Par compact uses a variable scale based on the density of the " \ 3488 "generation and treats this as the maximum value when the heap " \ 3489 "is either completely full or completely empty. Par compact " \ 3490 "also has a smaller default value; see arguments.cpp.") \ 3491 range(0, 100) \ 3492 \ 3493 product(uintx, MarkSweepAlwaysCompactCount, 4, \ 3494 "How often should we fully compact the heap (ignoring the dead " \ 3495 "space parameters)") \ 3496 range(1, max_uintx) \ 3497 \ 3498 product(intx, PrintCMSStatistics, 0, \ 3499 "Statistics for CMS") \ 3500 \ 3501 product(bool, PrintCMSInitiationStatistics, false, \ 3502 "Statistics for initiating a CMS collection") \ 3503 \ 3504 product(intx, PrintFLSStatistics, 0, \ 3505 "Statistics for CMS' FreeListSpace") \ 3506 \ 3507 product(intx, PrintFLSCensus, 0, \ 3508 "Census for CMS' FreeListSpace") \ 3509 \ 3510 develop(uintx, GCExpandToAllocateDelayMillis, 0, \ 3511 "Delay between expansion and allocation (in milliseconds)") \ 3512 \ 3513 develop(uintx, GCWorkerDelayMillis, 0, \ 3514 "Delay in scheduling GC workers (in milliseconds)") \ 3515 \ 3516 product(intx, DeferThrSuspendLoopCount, 4000, \ 3517 "(Unstable) Number of times to iterate in safepoint loop " \ 3518 "before blocking VM threads ") \ 3519 range(-1, max_jint-1) \ 3520 \ 3521 product(intx, DeferPollingPageLoopCount, -1, \ 3522 "(Unsafe,Unstable) Number of iterations in safepoint loop " \ 3523 "before changing safepoint polling page to RO ") \ 3524 range(-1, max_jint-1) \ 3525 \ 3526 product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 3527 range(0, max_intx) \ 3528 \ 3529 product(bool, PSChunkLargeArrays, true, \ 3530 "Process large arrays in chunks") \ 3531 \ 3532 product(uintx, GCDrainStackTargetSize, 64, \ 3533 "Number of entries we will try to leave on the stack " \ 3534 "during parallel gc") \ 3535 \ 3536 /* stack parameters */ \ 3537 product_pd(intx, StackYellowPages, \ 3538 "Number of yellow zone (recoverable overflows) pages") \ 3539 range(MIN_STACK_YELLOW_PAGES, (DEFAULT_STACK_YELLOW_PAGES+5)) \ 3540 \ 3541 product_pd(intx, StackRedPages, \ 3542 "Number of red zone (unrecoverable overflows) pages") \ 3543 range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2)) \ 3544 \ 3545 /* greater stack shadow pages can't generate instruction to bang stack */ \ 3546 product_pd(intx, StackShadowPages, \ 3547 "Number of shadow zone (for overflow checking) pages " \ 3548 "this should exceed the depth of the VM and native call stack") \ 3549 range(MIN_STACK_SHADOW_PAGES, (DEFAULT_STACK_SHADOW_PAGES+30)) \ 3550 \ 3551 product_pd(intx, ThreadStackSize, \ 3552 "Thread Stack Size (in Kbytes)") \ 3553 range(0, max_intx-os::vm_page_size()) \ 3554 \ 3555 product_pd(intx, VMThreadStackSize, \ 3556 "Non-Java Thread Stack Size (in Kbytes)") \ 3557 range(0, max_intx/(1 * K)) \ 3558 \ 3559 product_pd(intx, CompilerThreadStackSize, \ 3560 "Compiler Thread Stack Size (in Kbytes)") \ 3561 range(0, max_intx) \ 3562 \ 3563 develop_pd(size_t, JVMInvokeMethodSlack, \ 3564 "Stack space (bytes) required for JVM_InvokeMethod to complete") \ 3565 \ 3566 /* code cache parameters */ \ 3567 /* ppc64/tiered compilation has large code-entry alignment. */ \ 3568 develop(uintx, CodeCacheSegmentSize, \ 3569 64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)), \ 3570 "Code cache segment size (in bytes) - smallest unit of " \ 3571 "allocation") \ 3572 range(1, 1024) \ 3573 constraint(CodeCacheSegmentSizeConstraintFunc, AfterErgo) \ 3574 \ 3575 develop_pd(intx, CodeEntryAlignment, \ 3576 "Code entry alignment for generated code (in bytes)") \ 3577 constraint(CodeEntryAlignmentConstraintFunc, AfterErgo) \ 3578 \ 3579 product_pd(intx, OptoLoopAlignment, \ 3580 "Align inner loops to zero relative to this modulus") \ 3581 range(1, 16) \ 3582 constraint(OptoLoopAlignmentConstraintFunc, AfterErgo) \ 3583 \ 3584 product_pd(uintx, InitialCodeCacheSize, \ 3585 "Initial code cache size (in bytes)") \ 3586 range(0, max_uintx) \ 3587 \ 3588 develop_pd(uintx, CodeCacheMinimumUseSpace, \ 3589 "Minimum code cache size (in bytes) required to start VM.") \ 3590 range(0, max_uintx) \ 3591 \ 3592 product(bool, SegmentedCodeCache, false, \ 3593 "Use a segmented code cache") \ 3594 \ 3595 product_pd(uintx, ReservedCodeCacheSize, \ 3596 "Reserved code cache size (in bytes) - maximum code cache size") \ 3597 range(0, max_uintx) \ 3598 \ 3599 product_pd(uintx, NonProfiledCodeHeapSize, \ 3600 "Size of code heap with non-profiled methods (in bytes)") \ 3601 range(0, max_uintx) \ 3602 \ 3603 product_pd(uintx, ProfiledCodeHeapSize, \ 3604 "Size of code heap with profiled methods (in bytes)") \ 3605 range(0, max_uintx) \ 3606 \ 3607 product_pd(uintx, NonNMethodCodeHeapSize, \ 3608 "Size of code heap with non-nmethods (in bytes)") \ 3609 range(0, max_uintx) \ 3610 \ 3611 product_pd(uintx, CodeCacheExpansionSize, \ 3612 "Code cache expansion size (in bytes)") \ 3613 range(0, max_uintx) \ 3614 \ 3615 develop_pd(uintx, CodeCacheMinBlockLength, \ 3616 "Minimum number of segments in a code cache block") \ 3617 range(1, 100) \ 3618 \ 3619 notproduct(bool, ExitOnFullCodeCache, false, \ 3620 "Exit the VM if we fill the code cache") \ 3621 \ 3622 product(bool, UseCodeCacheFlushing, true, \ 3623 "Remove cold/old nmethods from the code cache") \ 3624 \ 3625 product(uintx, StartAggressiveSweepingAt, 10, \ 3626 "Start aggressive sweeping if X[%] of the code cache is free." \ 3627 "Segmented code cache: X[%] of the non-profiled heap." \ 3628 "Non-segmented code cache: X[%] of the total code cache") \ 3629 range(0, 100) \ 3630 \ 3631 /* interpreter debugging */ \ 3632 develop(intx, BinarySwitchThreshold, 5, \ 3633 "Minimal number of lookupswitch entries for rewriting to binary " \ 3634 "switch") \ 3635 \ 3636 develop(intx, StopInterpreterAt, 0, \ 3637 "Stop interpreter execution at specified bytecode number") \ 3638 \ 3639 develop(intx, TraceBytecodesAt, 0, \ 3640 "Trace bytecodes starting with specified bytecode number") \ 3641 \ 3642 /* compiler interface */ \ 3643 develop(intx, CIStart, 0, \ 3644 "The id of the first compilation to permit") \ 3645 \ 3646 develop(intx, CIStop, max_jint, \ 3647 "The id of the last compilation to permit") \ 3648 \ 3649 develop(intx, CIStartOSR, 0, \ 3650 "The id of the first osr compilation to permit " \ 3651 "(CICountOSR must be on)") \ 3652 \ 3653 develop(intx, CIStopOSR, max_jint, \ 3654 "The id of the last osr compilation to permit " \ 3655 "(CICountOSR must be on)") \ 3656 \ 3657 develop(intx, CIBreakAtOSR, -1, \ 3658 "The id of osr compilation to break at") \ 3659 \ 3660 develop(intx, CIBreakAt, -1, \ 3661 "The id of compilation to break at") \ 3662 \ 3663 product(ccstrlist, CompileOnly, "", \ 3664 "List of methods (pkg/class.name) to restrict compilation to") \ 3665 \ 3666 product(ccstr, CompileCommandFile, NULL, \ 3667 "Read compiler commands from this file [.hotspot_compiler]") \ 3668 \ 3669 diagnostic(ccstr, CompilerDirectivesFile, NULL, \ 3670 "Read compiler directives from this file") \ 3671 \ 3672 product(ccstrlist, CompileCommand, "", \ 3673 "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \ 3674 \ 3675 develop(bool, ReplayCompiles, false, \ 3676 "Enable replay of compilations from ReplayDataFile") \ 3677 \ 3678 product(ccstr, ReplayDataFile, NULL, \ 3679 "File containing compilation replay information" \ 3680 "[default: ./replay_pid%p.log] (%p replaced with pid)") \ 3681 \ 3682 product(ccstr, InlineDataFile, NULL, \ 3683 "File containing inlining replay information" \ 3684 "[default: ./inline_pid%p.log] (%p replaced with pid)") \ 3685 \ 3686 develop(intx, ReplaySuppressInitializers, 2, \ 3687 "Control handling of class initialization during replay: " \ 3688 "0 - don't do anything special; " \ 3689 "1 - treat all class initializers as empty; " \ 3690 "2 - treat class initializers for application classes as empty; " \ 3691 "3 - allow all class initializers to run during bootstrap but " \ 3692 " pretend they are empty after starting replay") \ 3693 range(0, 3) \ 3694 \ 3695 develop(bool, ReplayIgnoreInitErrors, false, \ 3696 "Ignore exceptions thrown during initialization for replay") \ 3697 \ 3698 product(bool, DumpReplayDataOnError, true, \ 3699 "Record replay data for crashing compiler threads") \ 3700 \ 3701 product(bool, CICompilerCountPerCPU, false, \ 3702 "1 compiler thread for log(N CPUs)") \ 3703 \ 3704 develop(intx, CIFireOOMAt, -1, \ 3705 "Fire OutOfMemoryErrors throughout CI for testing the compiler " \ 3706 "(non-negative value throws OOM after this many CI accesses " \ 3707 "in each compile)") \ 3708 notproduct(intx, CICrashAt, -1, \ 3709 "id of compilation to trigger assert in compiler thread for " \ 3710 "the purpose of testing, e.g. generation of replay data") \ 3711 notproduct(bool, CIObjectFactoryVerify, false, \ 3712 "enable potentially expensive verification in ciObjectFactory") \ 3713 \ 3714 /* Priorities */ \ 3715 product_pd(bool, UseThreadPriorities, "Use native thread priorities") \ 3716 \ 3717 product(intx, ThreadPriorityPolicy, 0, \ 3718 "0 : Normal. "\ 3719 " VM chooses priorities that are appropriate for normal "\ 3720 " applications. On Solaris NORM_PRIORITY and above are mapped "\ 3721 " to normal native priority. Java priorities below " \ 3722 " NORM_PRIORITY map to lower native priority values. On "\ 3723 " Windows applications are allowed to use higher native "\ 3724 " priorities. However, with ThreadPriorityPolicy=0, VM will "\ 3725 " not use the highest possible native priority, "\ 3726 " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\ 3727 " system threads. On Linux thread priorities are ignored "\ 3728 " because the OS does not support static priority in "\ 3729 " SCHED_OTHER scheduling class which is the only choice for "\ 3730 " non-root, non-realtime applications. "\ 3731 "1 : Aggressive. "\ 3732 " Java thread priorities map over to the entire range of "\ 3733 " native thread priorities. Higher Java thread priorities map "\ 3734 " to higher native thread priorities. This policy should be "\ 3735 " used with care, as sometimes it can cause performance "\ 3736 " degradation in the application and/or the entire system. On "\ 3737 " Linux this policy requires root privilege.") \ 3738 range(0, 1) \ 3739 \ 3740 product(bool, ThreadPriorityVerbose, false, \ 3741 "Print priority changes") \ 3742 \ 3743 product(intx, CompilerThreadPriority, -1, \ 3744 "The native priority at which compiler threads should run " \ 3745 "(-1 means no change)") \ 3746 range(min_jint, max_jint) \ 3747 constraint(CompilerThreadPriorityConstraintFunc, AfterErgo) \ 3748 \ 3749 product(intx, VMThreadPriority, -1, \ 3750 "The native priority at which the VM thread should run " \ 3751 "(-1 means no change)") \ 3752 range(-1, 127) \ 3753 \ 3754 product(bool, CompilerThreadHintNoPreempt, true, \ 3755 "(Solaris only) Give compiler threads an extra quanta") \ 3756 \ 3757 product(bool, VMThreadHintNoPreempt, false, \ 3758 "(Solaris only) Give VM thread an extra quanta") \ 3759 \ 3760 product(intx, JavaPriority1_To_OSPriority, -1, \ 3761 "Map Java priorities to OS priorities") \ 3762 range(-1, 127) \ 3763 \ 3764 product(intx, JavaPriority2_To_OSPriority, -1, \ 3765 "Map Java priorities to OS priorities") \ 3766 range(-1, 127) \ 3767 \ 3768 product(intx, JavaPriority3_To_OSPriority, -1, \ 3769 "Map Java priorities to OS priorities") \ 3770 range(-1, 127) \ 3771 \ 3772 product(intx, JavaPriority4_To_OSPriority, -1, \ 3773 "Map Java priorities to OS priorities") \ 3774 range(-1, 127) \ 3775 \ 3776 product(intx, JavaPriority5_To_OSPriority, -1, \ 3777 "Map Java priorities to OS priorities") \ 3778 range(-1, 127) \ 3779 \ 3780 product(intx, JavaPriority6_To_OSPriority, -1, \ 3781 "Map Java priorities to OS priorities") \ 3782 range(-1, 127) \ 3783 \ 3784 product(intx, JavaPriority7_To_OSPriority, -1, \ 3785 "Map Java priorities to OS priorities") \ 3786 range(-1, 127) \ 3787 \ 3788 product(intx, JavaPriority8_To_OSPriority, -1, \ 3789 "Map Java priorities to OS priorities") \ 3790 range(-1, 127) \ 3791 \ 3792 product(intx, JavaPriority9_To_OSPriority, -1, \ 3793 "Map Java priorities to OS priorities") \ 3794 range(-1, 127) \ 3795 \ 3796 product(intx, JavaPriority10_To_OSPriority,-1, \ 3797 "Map Java priorities to OS priorities") \ 3798 range(-1, 127) \ 3799 \ 3800 experimental(bool, UseCriticalJavaThreadPriority, false, \ 3801 "Java thread priority 10 maps to critical scheduling priority") \ 3802 \ 3803 experimental(bool, UseCriticalCompilerThreadPriority, false, \ 3804 "Compiler thread(s) run at critical scheduling priority") \ 3805 \ 3806 experimental(bool, UseCriticalCMSThreadPriority, false, \ 3807 "ConcurrentMarkSweep thread runs at critical scheduling priority")\ 3808 \ 3809 /* compiler debugging */ \ 3810 notproduct(intx, CompileTheWorldStartAt, 1, \ 3811 "First class to consider when using +CompileTheWorld") \ 3812 \ 3813 notproduct(intx, CompileTheWorldStopAt, max_jint, \ 3814 "Last class to consider when using +CompileTheWorld") \ 3815 \ 3816 develop(intx, NewCodeParameter, 0, \ 3817 "Testing Only: Create a dedicated integer parameter before " \ 3818 "putback") \ 3819 \ 3820 /* new oopmap storage allocation */ \ 3821 develop(intx, MinOopMapAllocation, 8, \ 3822 "Minimum number of OopMap entries in an OopMapSet") \ 3823 \ 3824 /* Background Compilation */ \ 3825 develop(intx, LongCompileThreshold, 50, \ 3826 "Used with +TraceLongCompiles") \ 3827 \ 3828 /* recompilation */ \ 3829 product_pd(intx, CompileThreshold, \ 3830 "number of interpreted method invocations before (re-)compiling") \ 3831 constraint(CompileThresholdConstraintFunc, AfterErgo) \ 3832 \ 3833 product(double, CompileThresholdScaling, 1.0, \ 3834 "Factor to control when first compilation happens " \ 3835 "(both with and without tiered compilation): " \ 3836 "values greater than 1.0 delay counter overflow, " \ 3837 "values between 0 and 1.0 rush counter overflow, " \ 3838 "value of 1.0 leaves compilation thresholds unchanged " \ 3839 "value of 0.0 is equivalent to -Xint. " \ 3840 "" \ 3841 "Flag can be set as per-method option. " \ 3842 "If a value is specified for a method, compilation thresholds " \ 3843 "for that method are scaled by both the value of the global flag "\ 3844 "and the value of the per-method flag.") \ 3845 range(0.0, DBL_MAX) \ 3846 \ 3847 product(intx, Tier0InvokeNotifyFreqLog, 7, \ 3848 "Interpreter (tier 0) invocation notification frequency") \ 3849 range(0, 30) \ 3850 \ 3851 product(intx, Tier2InvokeNotifyFreqLog, 11, \ 3852 "C1 without MDO (tier 2) invocation notification frequency") \ 3853 range(0, 30) \ 3854 \ 3855 product(intx, Tier3InvokeNotifyFreqLog, 10, \ 3856 "C1 with MDO profiling (tier 3) invocation notification " \ 3857 "frequency") \ 3858 range(0, 30) \ 3859 \ 3860 product(intx, Tier23InlineeNotifyFreqLog, 20, \ 3861 "Inlinee invocation (tiers 2 and 3) notification frequency") \ 3862 range(0, 30) \ 3863 \ 3864 product(intx, Tier0BackedgeNotifyFreqLog, 10, \ 3865 "Interpreter (tier 0) invocation notification frequency") \ 3866 range(0, 30) \ 3867 \ 3868 product(intx, Tier2BackedgeNotifyFreqLog, 14, \ 3869 "C1 without MDO (tier 2) invocation notification frequency") \ 3870 range(0, 30) \ 3871 \ 3872 product(intx, Tier3BackedgeNotifyFreqLog, 13, \ 3873 "C1 with MDO profiling (tier 3) invocation notification " \ 3874 "frequency") \ 3875 range(0, 30) \ 3876 \ 3877 product(intx, Tier2CompileThreshold, 0, \ 3878 "threshold at which tier 2 compilation is invoked") \ 3879 range(0, max_jint) \ 3880 \ 3881 product(intx, Tier2BackEdgeThreshold, 0, \ 3882 "Back edge threshold at which tier 2 compilation is invoked") \ 3883 range(0, max_jint) \ 3884 \ 3885 product(intx, Tier3InvocationThreshold, 200, \ 3886 "Compile if number of method invocations crosses this " \ 3887 "threshold") \ 3888 range(0, max_jint) \ 3889 \ 3890 product(intx, Tier3MinInvocationThreshold, 100, \ 3891 "Minimum invocation to compile at tier 3") \ 3892 range(0, max_jint) \ 3893 \ 3894 product(intx, Tier3CompileThreshold, 2000, \ 3895 "Threshold at which tier 3 compilation is invoked (invocation " \ 3896 "minimum must be satisfied)") \ 3897 range(0, max_jint) \ 3898 \ 3899 product(intx, Tier3BackEdgeThreshold, 60000, \ 3900 "Back edge threshold at which tier 3 OSR compilation is invoked") \ 3901 range(0, max_jint) \ 3902 \ 3903 product(intx, Tier4InvocationThreshold, 5000, \ 3904 "Compile if number of method invocations crosses this " \ 3905 "threshold") \ 3906 range(0, max_jint) \ 3907 \ 3908 product(intx, Tier4MinInvocationThreshold, 600, \ 3909 "Minimum invocation to compile at tier 4") \ 3910 range(0, max_jint) \ 3911 \ 3912 product(intx, Tier4CompileThreshold, 15000, \ 3913 "Threshold at which tier 4 compilation is invoked (invocation " \ 3914 "minimum must be satisfied") \ 3915 range(0, max_jint) \ 3916 \ 3917 product(intx, Tier4BackEdgeThreshold, 40000, \ 3918 "Back edge threshold at which tier 4 OSR compilation is invoked") \ 3919 range(0, max_jint) \ 3920 \ 3921 product(intx, Tier3DelayOn, 5, \ 3922 "If C2 queue size grows over this amount per compiler thread " \ 3923 "stop compiling at tier 3 and start compiling at tier 2") \ 3924 range(0, max_jint) \ 3925 \ 3926 product(intx, Tier3DelayOff, 2, \ 3927 "If C2 queue size is less than this amount per compiler thread " \ 3928 "allow methods compiled at tier 2 transition to tier 3") \ 3929 range(0, max_jint) \ 3930 \ 3931 product(intx, Tier3LoadFeedback, 5, \ 3932 "Tier 3 thresholds will increase twofold when C1 queue size " \ 3933 "reaches this amount per compiler thread") \ 3934 range(0, max_jint) \ 3935 \ 3936 product(intx, Tier4LoadFeedback, 3, \ 3937 "Tier 4 thresholds will increase twofold when C2 queue size " \ 3938 "reaches this amount per compiler thread") \ 3939 range(0, max_jint) \ 3940 \ 3941 product(intx, TieredCompileTaskTimeout, 50, \ 3942 "Kill compile task if method was not used within " \ 3943 "given timeout in milliseconds") \ 3944 range(0, max_intx) \ 3945 \ 3946 product(intx, TieredStopAtLevel, 4, \ 3947 "Stop at given compilation level") \ 3948 range(0, 4) \ 3949 \ 3950 product(intx, Tier0ProfilingStartPercentage, 200, \ 3951 "Start profiling in interpreter if the counters exceed tier 3 " \ 3952 "thresholds by the specified percentage") \ 3953 range(0, max_jint) \ 3954 \ 3955 product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ 3956 "Increase the compile threshold for C1 compilation if the code " \ 3957 "cache is filled by the specified percentage") \ 3958 range(0, 99) \ 3959 \ 3960 product(intx, TieredRateUpdateMinTime, 1, \ 3961 "Minimum rate sampling interval (in milliseconds)") \ 3962 range(0, max_intx) \ 3963 \ 3964 product(intx, TieredRateUpdateMaxTime, 25, \ 3965 "Maximum rate sampling interval (in milliseconds)") \ 3966 range(0, max_intx) \ 3967 \ 3968 product_pd(bool, TieredCompilation, \ 3969 "Enable tiered compilation") \ 3970 \ 3971 product(bool, PrintTieredEvents, false, \ 3972 "Print tiered events notifications") \ 3973 \ 3974 product_pd(intx, OnStackReplacePercentage, \ 3975 "NON_TIERED number of method invocations/branches (expressed as " \ 3976 "% of CompileThreshold) before (re-)compiling OSR code") \ 3977 constraint(OnStackReplacePercentageConstraintFunc, AfterErgo) \ 3978 \ 3979 product(intx, InterpreterProfilePercentage, 33, \ 3980 "NON_TIERED number of method invocations/branches (expressed as " \ 3981 "% of CompileThreshold) before profiling in the interpreter") \ 3982 range(0, 100) \ 3983 \ 3984 develop(intx, MaxRecompilationSearchLength, 10, \ 3985 "The maximum number of frames to inspect when searching for " \ 3986 "recompilee") \ 3987 \ 3988 develop(intx, MaxInterpretedSearchLength, 3, \ 3989 "The maximum number of interpreted frames to skip when searching "\ 3990 "for recompilee") \ 3991 \ 3992 develop(intx, DesiredMethodLimit, 8000, \ 3993 "The desired maximum method size (in bytecodes) after inlining") \ 3994 \ 3995 develop(intx, HugeMethodLimit, 8000, \ 3996 "Don't compile methods larger than this if " \ 3997 "+DontCompileHugeMethods") \ 3998 \ 3999 /* New JDK 1.4 reflection implementation */ \ 4000 \ 4001 develop(intx, FastSuperclassLimit, 8, \ 4002 "Depth of hardwired instanceof accelerator array") \ 4003 \ 4004 /* Properties for Java libraries */ \ 4005 \ 4006 product(size_t, MaxDirectMemorySize, 0, \ 4007 "Maximum total size of NIO direct-buffer allocations") \ 4008 range(0, (size_t)SIZE_MAX) \ 4009 \ 4010 /* Flags used for temporary code during development */ \ 4011 \ 4012 diagnostic(bool, UseNewCode, false, \ 4013 "Testing Only: Use the new version while testing") \ 4014 \ 4015 diagnostic(bool, UseNewCode2, false, \ 4016 "Testing Only: Use the new version while testing") \ 4017 \ 4018 diagnostic(bool, UseNewCode3, false, \ 4019 "Testing Only: Use the new version while testing") \ 4020 \ 4021 /* flags for performance data collection */ \ 4022 \ 4023 product(bool, UsePerfData, falseInEmbedded, \ 4024 "Flag to disable jvmstat instrumentation for performance testing "\ 4025 "and problem isolation purposes") \ 4026 \ 4027 product(bool, PerfDataSaveToFile, false, \ 4028 "Save PerfData memory to hsperfdata_<pid> file on exit") \ 4029 \ 4030 product(ccstr, PerfDataSaveFile, NULL, \ 4031 "Save PerfData memory to the specified absolute pathname. " \ 4032 "The string %p in the file name (if present) " \ 4033 "will be replaced by pid") \ 4034 \ 4035 product(intx, PerfDataSamplingInterval, 50, \ 4036 "Data sampling interval (in milliseconds)") \ 4037 range(PeriodicTask::min_interval, max_jint) \ 4038 constraint(PerfDataSamplingIntervalFunc, AfterErgo) \ 4039 \ 4040 develop(bool, PerfTraceDataCreation, false, \ 4041 "Trace creation of Performance Data Entries") \ 4042 \ 4043 develop(bool, PerfTraceMemOps, false, \ 4044 "Trace PerfMemory create/attach/detach calls") \ 4045 \ 4046 product(bool, PerfDisableSharedMem, false, \ 4047 "Store performance data in standard memory") \ 4048 \ 4049 product(intx, PerfDataMemorySize, 64*K, \ 4050 "Size of performance data memory region. Will be rounded " \ 4051 "up to a multiple of the native os page size.") \ 4052 range(128, 32*64*K) \ 4053 \ 4054 product(intx, PerfMaxStringConstLength, 1024, \ 4055 "Maximum PerfStringConstant string length before truncation") \ 4056 range(32, 32*K) \ 4057 \ 4058 product(bool, PerfAllowAtExitRegistration, false, \ 4059 "Allow registration of atexit() methods") \ 4060 \ 4061 product(bool, PerfBypassFileSystemCheck, false, \ 4062 "Bypass Win32 file system criteria checks (Windows Only)") \ 4063 \ 4064 product(intx, UnguardOnExecutionViolation, 0, \ 4065 "Unguard page and retry on no-execute fault (Win32 only) " \ 4066 "0=off, 1=conservative, 2=aggressive") \ 4067 range(0, 2) \ 4068 \ 4069 /* Serviceability Support */ \ 4070 \ 4071 product(bool, ManagementServer, false, \ 4072 "Create JMX Management Server") \ 4073 \ 4074 product(bool, DisableAttachMechanism, false, \ 4075 "Disable mechanism that allows tools to attach to this VM") \ 4076 \ 4077 product(bool, StartAttachListener, false, \ 4078 "Always start Attach Listener at VM startup") \ 4079 \ 4080 manageable(bool, PrintConcurrentLocks, false, \ 4081 "Print java.util.concurrent locks in thread dump") \ 4082 \ 4083 product(bool, TransmitErrorReport, false, \ 4084 "Enable error report transmission on erroneous termination") \ 4085 \ 4086 product(ccstr, ErrorReportServer, NULL, \ 4087 "Override built-in error report server address") \ 4088 \ 4089 /* Shared spaces */ \ 4090 \ 4091 product(bool, UseSharedSpaces, true, \ 4092 "Use shared spaces for metadata") \ 4093 \ 4094 product(bool, VerifySharedSpaces, false, \ 4095 "Verify shared spaces (false for default archive, true for " \ 4096 "archive specified by -XX:SharedArchiveFile)") \ 4097 \ 4098 product(bool, RequireSharedSpaces, false, \ 4099 "Require shared spaces for metadata") \ 4100 \ 4101 product(bool, DumpSharedSpaces, false, \ 4102 "Special mode: JVM reads a class list, loads classes, builds " \ 4103 "shared spaces, and dumps the shared spaces to a file to be " \ 4104 "used in future JVM runs") \ 4105 \ 4106 product(bool, PrintSharedSpaces, false, \ 4107 "Print usage of shared spaces") \ 4108 \ 4109 product(bool, PrintSharedArchiveAndExit, false, \ 4110 "Print shared archive file contents") \ 4111 \ 4112 product(bool, PrintSharedDictionary, false, \ 4113 "If PrintSharedArchiveAndExit is true, also print the shared " \ 4114 "dictionary") \ 4115 \ 4116 product(size_t, SharedReadWriteSize, DEFAULT_SHARED_READ_WRITE_SIZE, \ 4117 "Size of read-write space for metadata (in bytes)") \ 4118 range(MIN_SHARED_READ_WRITE_SIZE, MAX_SHARED_READ_WRITE_SIZE) \ 4119 \ 4120 product(size_t, SharedReadOnlySize, DEFAULT_SHARED_READ_ONLY_SIZE, \ 4121 "Size of read-only space for metadata (in bytes)") \ 4122 range(MIN_SHARED_READ_ONLY_SIZE, MAX_SHARED_READ_ONLY_SIZE) \ 4123 \ 4124 product(size_t, SharedMiscDataSize, DEFAULT_SHARED_MISC_DATA_SIZE, \ 4125 "Size of the shared miscellaneous data area (in bytes)") \ 4126 range(MIN_SHARED_MISC_DATA_SIZE, MAX_SHARED_MISC_DATA_SIZE) \ 4127 \ 4128 product(size_t, SharedMiscCodeSize, DEFAULT_SHARED_MISC_CODE_SIZE, \ 4129 "Size of the shared miscellaneous code area (in bytes)") \ 4130 range(MIN_SHARED_MISC_CODE_SIZE, MAX_SHARED_MISC_CODE_SIZE) \ 4131 \ 4132 product(size_t, SharedBaseAddress, LP64_ONLY(32*G) \ 4133 NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \ 4134 "Address to allocate shared memory region for class data") \ 4135 range(0, SIZE_MAX) \ 4136 \ 4137 product(uintx, SharedSymbolTableBucketSize, 4, \ 4138 "Average number of symbols per bucket in shared table") \ 4139 range(2, 246) \ 4140 \ 4141 diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false, \ 4142 "Do not quit -Xshare:dump even if we encounter unverifiable " \ 4143 "classes. Just exclude them from the shared dictionary.") \ 4144 \ 4145 diagnostic(bool, PrintMethodHandleStubs, false, \ 4146 "Print generated stub code for method handles") \ 4147 \ 4148 develop(bool, TraceMethodHandles, false, \ 4149 "trace internal method handle operations") \ 4150 \ 4151 diagnostic(bool, VerifyMethodHandles, trueInDebug, \ 4152 "perform extra checks when constructing method handles") \ 4153 \ 4154 diagnostic(bool, ShowHiddenFrames, false, \ 4155 "show method handle implementation frames (usually hidden)") \ 4156 \ 4157 experimental(bool, TrustFinalNonStaticFields, false, \ 4158 "trust final non-static declarations for constant folding") \ 4159 \ 4160 diagnostic(bool, FoldStableValues, true, \ 4161 "Optimize loads from stable fields (marked w/ @Stable)") \ 4162 \ 4163 develop(bool, TraceInvokeDynamic, false, \ 4164 "trace internal invoke dynamic operations") \ 4165 \ 4166 diagnostic(bool, PauseAtStartup, false, \ 4167 "Causes the VM to pause at startup time and wait for the pause " \ 4168 "file to be removed (default: ./vm.paused.<pid>)") \ 4169 \ 4170 diagnostic(ccstr, PauseAtStartupFile, NULL, \ 4171 "The file to create and for whose removal to await when pausing " \ 4172 "at startup. (default: ./vm.paused.<pid>)") \ 4173 \ 4174 diagnostic(bool, PauseAtExit, false, \ 4175 "Pause and wait for keypress on exit if a debugger is attached") \ 4176 \ 4177 product(bool, ExtendedDTraceProbes, false, \ 4178 "Enable performance-impacting dtrace probes") \ 4179 \ 4180 product(bool, DTraceMethodProbes, false, \ 4181 "Enable dtrace probes for method-entry and method-exit") \ 4182 \ 4183 product(bool, DTraceAllocProbes, false, \ 4184 "Enable dtrace probes for object allocation") \ 4185 \ 4186 product(bool, DTraceMonitorProbes, false, \ 4187 "Enable dtrace probes for monitor events") \ 4188 \ 4189 product(bool, RelaxAccessControlCheck, false, \ 4190 "Relax the access control checks in the verifier") \ 4191 \ 4192 product(uintx, StringTableSize, defaultStringTableSize, \ 4193 "Number of buckets in the interned String table") \ 4194 range(minimumStringTableSize, 111*defaultStringTableSize) \ 4195 \ 4196 experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \ 4197 "Number of buckets in the JVM internal Symbol table") \ 4198 range(minimumSymbolTableSize, 111*defaultSymbolTableSize) \ 4199 \ 4200 product(bool, UseStringDeduplication, false, \ 4201 "Use string deduplication") \ 4202 \ 4203 product(bool, PrintStringDeduplicationStatistics, false, \ 4204 "Print string deduplication statistics") \ 4205 \ 4206 product(uintx, StringDeduplicationAgeThreshold, 3, \ 4207 "A string must reach this age (or be promoted to an old region) " \ 4208 "to be considered for deduplication") \ 4209 range(1, markOopDesc::max_age) \ 4210 \ 4211 diagnostic(bool, StringDeduplicationResizeALot, false, \ 4212 "Force table resize every time the table is scanned") \ 4213 \ 4214 diagnostic(bool, StringDeduplicationRehashALot, false, \ 4215 "Force table rehash every time the table is scanned") \ 4216 \ 4217 diagnostic(bool, WhiteBoxAPI, false, \ 4218 "Enable internal testing APIs") \ 4219 \ 4220 product(bool, PrintGCCause, true, \ 4221 "Include GC cause in GC logging") \ 4222 \ 4223 experimental(intx, SurvivorAlignmentInBytes, 0, \ 4224 "Default survivor space alignment in bytes") \ 4225 constraint(SurvivorAlignmentInBytesConstraintFunc,AfterErgo) \ 4226 \ 4227 product(bool , AllowNonVirtualCalls, false, \ 4228 "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \ 4229 \ 4230 product(ccstr, DumpLoadedClassList, NULL, \ 4231 "Dump the names all loaded classes, that could be stored into " \ 4232 "the CDS archive, in the specified file") \ 4233 \ 4234 product(ccstr, SharedClassListFile, NULL, \ 4235 "Override the default CDS class list") \ 4236 \ 4237 diagnostic(ccstr, SharedArchiveFile, NULL, \ 4238 "Override the default location of the CDS archive file") \ 4239 \ 4240 product(ccstr, ExtraSharedClassListFile, NULL, \ 4241 "Extra classlist for building the CDS archive file") \ 4242 \ 4243 experimental(size_t, ArrayAllocatorMallocLimit, \ 4244 SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \ 4245 "Allocation less than this value will be allocated " \ 4246 "using malloc. Larger allocations will use mmap.") \ 4247 \ 4248 experimental(bool, AlwaysAtomicAccesses, false, \ 4249 "Accesses to all variables should always be atomic") \ 4250 \ 4251 product(bool, EnableTracing, false, \ 4252 "Enable event-based tracing") \ 4253 \ 4254 product(bool, UseLockedTracing, false, \ 4255 "Use locked-tracing when doing event-based tracing") \ 4256 \ 4257 diagnostic(bool, UseUnalignedAccesses, false, \ 4258 "Use unaligned memory accesses in Unsafe") \ 4259 \ 4260 product_pd(bool, PreserveFramePointer, \ 4261 "Use the FP register for holding the frame pointer " \ 4262 "and not as a general purpose register.") \ 4263 \ 4264 diagnostic(bool, CheckIntrinsics, true, \ 4265 "When a class C is loaded, check that " \ 4266 "(1) all intrinsics defined by the VM for class C are present "\ 4267 "in the loaded class file and are marked with the " \ 4268 "@HotSpotIntrinsicCandidate annotation, that " \ 4269 "(2) there is an intrinsic registered for all loaded methods " \ 4270 "that are annotated with the @HotSpotIntrinsicCandidate " \ 4271 "annotation, and that " \ 4272 "(3) no orphan methods exist for class C (i.e., methods for " \ 4273 "which the VM declares an intrinsic but that are not declared "\ 4274 "in the loaded class C. " \ 4275 "Check (3) is available only in debug builds.") \ 4276 \ 4277 diagnostic(bool, CompilerDirectivesIgnoreCompileCommands, false, \ 4278 "Disable backwards compatibility for compile commands.") \ 4279 \ 4280 diagnostic(bool, PrintCompilerDirectives, false, \ 4281 "Print compiler directives on installation.") 4282 4283/* 4284 * Macros for factoring of globals 4285 */ 4286 4287// Interface macros 4288#define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4289#define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name; 4290#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name; 4291#define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name; 4292#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name; 4293#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name; 4294#ifdef PRODUCT 4295#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type CONST_##name; const type name = value; 4296#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type CONST_##name; const type name = pd_##name; 4297#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type CONST_##name; 4298#else 4299#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name; 4300#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name; 4301#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4302#endif // PRODUCT 4303// Special LP64 flags, product only needed for now. 4304#ifdef _LP64 4305#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4306#else 4307#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value; 4308#endif // _LP64 4309 4310// Implementation macros 4311#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value; 4312#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name; 4313#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value; 4314#define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value; 4315#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value; 4316#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value; 4317#ifdef PRODUCT 4318#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type CONST_##name = value; 4319#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type CONST_##name = pd_##name; 4320#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type CONST_##name = value; 4321#else 4322#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value; 4323#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name; 4324#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value; 4325#endif // PRODUCT 4326#ifdef _LP64 4327#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value; 4328#else 4329#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */ 4330#endif // _LP64 4331 4332// Only materialize src code for range checking when required, ignore otherwise 4333#define IGNORE_RANGE(a, b) 4334// Only materialize src code for contraint checking when required, ignore otherwise 4335#define IGNORE_CONSTRAINT(func,type) 4336 4337RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4338 DECLARE_PD_DEVELOPER_FLAG, \ 4339 DECLARE_PRODUCT_FLAG, \ 4340 DECLARE_PD_PRODUCT_FLAG, \ 4341 DECLARE_DIAGNOSTIC_FLAG, \ 4342 DECLARE_EXPERIMENTAL_FLAG, \ 4343 DECLARE_NOTPRODUCT_FLAG, \ 4344 DECLARE_MANAGEABLE_FLAG, \ 4345 DECLARE_PRODUCT_RW_FLAG, \ 4346 DECLARE_LP64_PRODUCT_FLAG, \ 4347 IGNORE_RANGE, \ 4348 IGNORE_CONSTRAINT) 4349 4350RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4351 DECLARE_PD_DEVELOPER_FLAG, \ 4352 DECLARE_PRODUCT_FLAG, \ 4353 DECLARE_PD_PRODUCT_FLAG, \ 4354 DECLARE_DIAGNOSTIC_FLAG, \ 4355 DECLARE_NOTPRODUCT_FLAG, \ 4356 IGNORE_RANGE, \ 4357 IGNORE_CONSTRAINT) 4358 4359ARCH_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4360 DECLARE_PRODUCT_FLAG, \ 4361 DECLARE_DIAGNOSTIC_FLAG, \ 4362 DECLARE_EXPERIMENTAL_FLAG, \ 4363 DECLARE_NOTPRODUCT_FLAG, \ 4364 IGNORE_RANGE, \ 4365 IGNORE_CONSTRAINT) 4366 4367// Extensions 4368 4369#include "runtime/globals_ext.hpp" 4370 4371#endif // SHARE_VM_RUNTIME_GLOBALS_HPP 4372