globals.hpp revision 356:1ee8caae33af
1255736Sdavidch/* 2296580Sdavidcs * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 3255736Sdavidch * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4255736Sdavidch * 5255736Sdavidch * This code is free software; you can redistribute it and/or modify it 6255736Sdavidch * under the terms of the GNU General Public License version 2 only, as 7255736Sdavidch * published by the Free Software Foundation. 8255736Sdavidch * 9255736Sdavidch * This code is distributed in the hope that it will be useful, but WITHOUT 10255736Sdavidch * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11255736Sdavidch * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12255736Sdavidch * version 2 for more details (a copy is included in the LICENSE file that 13255736Sdavidch * accompanied this code). 14296580Sdavidcs * 15255736Sdavidch * You should have received a copy of the GNU General Public License version 16255736Sdavidch * 2 along with this work; if not, write to the Free Software Foundation, 17255736Sdavidch * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18255736Sdavidch * 19255736Sdavidch * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20255736Sdavidch * CA 95054 USA or visit www.sun.com if you need additional information or 21255736Sdavidch * have any questions. 22255736Sdavidch * 23255736Sdavidch */ 24255736Sdavidch 25255736Sdavidch#if !defined(COMPILER1) && !defined(COMPILER2) 26255736Sdavidchdefine_pd_global(bool, BackgroundCompilation, false); 27255736Sdavidchdefine_pd_global(bool, UseTLAB, false); 28255736Sdavidchdefine_pd_global(bool, CICompileOSR, false); 29255736Sdavidchdefine_pd_global(bool, UseTypeProfile, false); 30255736Sdavidchdefine_pd_global(bool, UseOnStackReplacement, false); 31255736Sdavidchdefine_pd_global(bool, InlineIntrinsics, false); 32255736Sdavidchdefine_pd_global(bool, PreferInterpreterNativeStubs, true); 33296580Sdavidcsdefine_pd_global(bool, ProfileInterpreter, false); 34296580Sdavidcsdefine_pd_global(bool, ProfileTraps, false); 35296580Sdavidcsdefine_pd_global(bool, TieredCompilation, false); 36255736Sdavidch 37255736Sdavidchdefine_pd_global(intx, CompileThreshold, 0); 38255736Sdavidchdefine_pd_global(intx, Tier2CompileThreshold, 0); 39255736Sdavidchdefine_pd_global(intx, Tier3CompileThreshold, 0); 40255736Sdavidchdefine_pd_global(intx, Tier4CompileThreshold, 0); 41255736Sdavidch 42255736Sdavidchdefine_pd_global(intx, BackEdgeThreshold, 0); 43255736Sdavidchdefine_pd_global(intx, Tier2BackEdgeThreshold, 0); 44255736Sdavidchdefine_pd_global(intx, Tier3BackEdgeThreshold, 0); 45255736Sdavidchdefine_pd_global(intx, Tier4BackEdgeThreshold, 0); 46255736Sdavidch 47255736Sdavidchdefine_pd_global(intx, OnStackReplacePercentage, 0); 48255736Sdavidchdefine_pd_global(bool, ResizeTLAB, false); 49255736Sdavidchdefine_pd_global(intx, FreqInlineSize, 0); 50255736Sdavidchdefine_pd_global(intx, NewSizeThreadIncrease, 4*K); 51255736Sdavidchdefine_pd_global(intx, NewRatio, 4); 52255736Sdavidchdefine_pd_global(intx, InlineClassNatives, true); 53255736Sdavidchdefine_pd_global(intx, InlineUnsafeOps, true); 54255736Sdavidchdefine_pd_global(intx, InitialCodeCacheSize, 160*K); 55255736Sdavidchdefine_pd_global(intx, ReservedCodeCacheSize, 32*M); 56255736Sdavidchdefine_pd_global(intx, CodeCacheExpansionSize, 32*K); 57255736Sdavidchdefine_pd_global(intx, CodeCacheMinBlockLength, 1); 58255736Sdavidchdefine_pd_global(uintx,PermSize, ScaleForWordSize(4*M)); 59255736Sdavidchdefine_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M)); 60255736Sdavidchdefine_pd_global(bool, NeverActAsServerClassMachine, true); 61255736Sdavidchdefine_pd_global(uintx, DefaultMaxRAM, 1*G); 62255736Sdavidch#define CI_COMPILER_COUNT 0 63255736Sdavidch#else 64255736Sdavidch 65255736Sdavidch#ifdef COMPILER2 66255736Sdavidch#define CI_COMPILER_COUNT 2 67255736Sdavidch#else 68255736Sdavidch#define CI_COMPILER_COUNT 1 69255736Sdavidch#endif // COMPILER2 70255736Sdavidch 71255736Sdavidch#endif // no compilers 72255736Sdavidch 73255736Sdavidch 74255736Sdavidch// string type aliases used only in this file 75255736Sdavidchtypedef const char* ccstr; 76255736Sdavidchtypedef const char* ccstrlist; // represents string arguments which accumulate 77255736Sdavidch 78255736Sdavidchenum FlagValueOrigin { 79255736Sdavidch DEFAULT = 0, 80255736Sdavidch COMMAND_LINE = 1, 81255736Sdavidch ENVIRON_VAR = 2, 82255736Sdavidch CONFIG_FILE = 3, 83255736Sdavidch MANAGEMENT = 4, 84255736Sdavidch ERGONOMIC = 5, 85255736Sdavidch ATTACH_ON_DEMAND = 6, 86255736Sdavidch INTERNAL = 99 87255736Sdavidch}; 88255736Sdavidch 89255736Sdavidchstruct Flag { 90255736Sdavidch const char *type; 91255736Sdavidch const char *name; 92255736Sdavidch void* addr; 93255736Sdavidch const char *kind; 94255736Sdavidch FlagValueOrigin origin; 95255736Sdavidch 96255736Sdavidch // points to all Flags static array 97255736Sdavidch static Flag *flags; 98255736Sdavidch 99255736Sdavidch // number of flags 100255736Sdavidch static size_t numFlags; 101255736Sdavidch 102255736Sdavidch static Flag* find_flag(char* name, size_t length); 103255736Sdavidch 104255736Sdavidch bool is_bool() const { return strcmp(type, "bool") == 0; } 105255736Sdavidch bool get_bool() const { return *((bool*) addr); } 106255736Sdavidch void set_bool(bool value) { *((bool*) addr) = value; } 107255736Sdavidch 108255736Sdavidch bool is_intx() const { return strcmp(type, "intx") == 0; } 109255736Sdavidch intx get_intx() const { return *((intx*) addr); } 110255736Sdavidch void set_intx(intx value) { *((intx*) addr) = value; } 111255736Sdavidch 112255736Sdavidch bool is_uintx() const { return strcmp(type, "uintx") == 0; } 113255736Sdavidch uintx get_uintx() const { return *((uintx*) addr); } 114255736Sdavidch void set_uintx(uintx value) { *((uintx*) addr) = value; } 115255736Sdavidch 116255736Sdavidch bool is_double() const { return strcmp(type, "double") == 0; } 117255736Sdavidch double get_double() const { return *((double*) addr); } 118255736Sdavidch void set_double(double value) { *((double*) addr) = value; } 119255736Sdavidch 120255736Sdavidch bool is_ccstr() const { return strcmp(type, "ccstr") == 0 || strcmp(type, "ccstrlist") == 0; } 121255736Sdavidch bool ccstr_accumulates() const { return strcmp(type, "ccstrlist") == 0; } 122255736Sdavidch ccstr get_ccstr() const { return *((ccstr*) addr); } 123255736Sdavidch void set_ccstr(ccstr value) { *((ccstr*) addr) = value; } 124255736Sdavidch 125255736Sdavidch bool is_unlocker() const; 126255736Sdavidch bool is_unlocked() const; 127255736Sdavidch bool is_writeable() const; 128255736Sdavidch bool is_external() const; 129255736Sdavidch 130255736Sdavidch void print_on(outputStream* st); 131255736Sdavidch void print_as_flag(outputStream* st); 132255736Sdavidch}; 133255736Sdavidch 134255736Sdavidch// debug flags control various aspects of the VM and are global accessible 135255736Sdavidch 136255736Sdavidch// use FlagSetting to temporarily change some debug flag 137255736Sdavidch// e.g. FlagSetting fs(DebugThisAndThat, true); 138255736Sdavidch// restored to previous value upon leaving scope 139255736Sdavidchclass FlagSetting { 140255736Sdavidch bool val; 141255736Sdavidch bool* flag; 142255736Sdavidch public: 143255736Sdavidch FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; } 144255736Sdavidch ~FlagSetting() { *flag = val; } 145255736Sdavidch}; 146255736Sdavidch 147255736Sdavidch 148255736Sdavidchclass CounterSetting { 149255736Sdavidch intx* counter; 150255736Sdavidch public: 151255736Sdavidch CounterSetting(intx* cnt) { counter = cnt; (*counter)++; } 152255736Sdavidch ~CounterSetting() { (*counter)--; } 153255736Sdavidch}; 154255736Sdavidch 155255736Sdavidch 156255736Sdavidchclass IntFlagSetting { 157255736Sdavidch intx val; 158255736Sdavidch intx* flag; 159255736Sdavidch public: 160255736Sdavidch IntFlagSetting(intx& fl, intx newValue) { flag = &fl; val = fl; fl = newValue; } 161255736Sdavidch ~IntFlagSetting() { *flag = val; } 162255736Sdavidch}; 163255736Sdavidch 164255736Sdavidch 165255736Sdavidchclass DoubleFlagSetting { 166255736Sdavidch double val; 167255736Sdavidch double* flag; 168255736Sdavidch public: 169255736Sdavidch DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; } 170255736Sdavidch ~DoubleFlagSetting() { *flag = val; } 171255736Sdavidch}; 172255736Sdavidch 173255736Sdavidch 174255736Sdavidchclass CommandLineFlags { 175255736Sdavidch public: 176255736Sdavidch static bool boolAt(char* name, size_t len, bool* value); 177255736Sdavidch static bool boolAt(char* name, bool* value) { return boolAt(name, strlen(name), value); } 178255736Sdavidch static bool boolAtPut(char* name, size_t len, bool* value, FlagValueOrigin origin); 179255736Sdavidch static bool boolAtPut(char* name, bool* value, FlagValueOrigin origin) { return boolAtPut(name, strlen(name), value, origin); } 180255736Sdavidch 181255736Sdavidch static bool intxAt(char* name, size_t len, intx* value); 182255736Sdavidch static bool intxAt(char* name, intx* value) { return intxAt(name, strlen(name), value); } 183255736Sdavidch static bool intxAtPut(char* name, size_t len, intx* value, FlagValueOrigin origin); 184255736Sdavidch static bool intxAtPut(char* name, intx* value, FlagValueOrigin origin) { return intxAtPut(name, strlen(name), value, origin); } 185255736Sdavidch 186255736Sdavidch static bool uintxAt(char* name, size_t len, uintx* value); 187255736Sdavidch static bool uintxAt(char* name, uintx* value) { return uintxAt(name, strlen(name), value); } 188255736Sdavidch static bool uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin); 189255736Sdavidch static bool uintxAtPut(char* name, uintx* value, FlagValueOrigin origin) { return uintxAtPut(name, strlen(name), value, origin); } 190255736Sdavidch 191255736Sdavidch static bool doubleAt(char* name, size_t len, double* value); 192255736Sdavidch static bool doubleAt(char* name, double* value) { return doubleAt(name, strlen(name), value); } 193255736Sdavidch static bool doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin); 194255736Sdavidch static bool doubleAtPut(char* name, double* value, FlagValueOrigin origin) { return doubleAtPut(name, strlen(name), value, origin); } 195255736Sdavidch 196255736Sdavidch static bool ccstrAt(char* name, size_t len, ccstr* value); 197255736Sdavidch static bool ccstrAt(char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); } 198255736Sdavidch static bool ccstrAtPut(char* name, size_t len, ccstr* value, FlagValueOrigin origin); 199255736Sdavidch static bool ccstrAtPut(char* name, ccstr* value, FlagValueOrigin origin) { return ccstrAtPut(name, strlen(name), value, origin); } 200255736Sdavidch 201255736Sdavidch // Returns false if name is not a command line flag. 202255736Sdavidch static bool wasSetOnCmdline(const char* name, bool* value); 203255736Sdavidch static void printSetFlags(); 204255736Sdavidch 205255736Sdavidch static void printFlags() PRODUCT_RETURN; 206255736Sdavidch 207255736Sdavidch static void verify() PRODUCT_RETURN; 208255736Sdavidch}; 209255736Sdavidch 210255736Sdavidch// use this for flags that are true by default in the debug version but 211255736Sdavidch// false in the optimized version, and vice versa 212255736Sdavidch#ifdef ASSERT 213255736Sdavidch#define trueInDebug true 214255736Sdavidch#define falseInDebug false 215255736Sdavidch#else 216255736Sdavidch#define trueInDebug false 217255736Sdavidch#define falseInDebug true 218255736Sdavidch#endif 219255736Sdavidch 220255736Sdavidch// use this for flags that are true per default in the product build 221255736Sdavidch// but false in development builds, and vice versa 222255736Sdavidch#ifdef PRODUCT 223255736Sdavidch#define trueInProduct true 224255736Sdavidch#define falseInProduct false 225255736Sdavidch#else 226255736Sdavidch#define trueInProduct false 227255736Sdavidch#define falseInProduct true 228255736Sdavidch#endif 229255736Sdavidch 230255736Sdavidch// use this for flags that are true per default in the tiered build 231255736Sdavidch// but false in non-tiered builds, and vice versa 232255736Sdavidch#ifdef TIERED 233255736Sdavidch#define trueInTiered true 234255736Sdavidch#define falseInTiered false 235255736Sdavidch#else 236255736Sdavidch#define trueInTiered false 237255736Sdavidch#define falseInTiered true 238255736Sdavidch#endif 239255736Sdavidch 240255736Sdavidch// develop flags are settable / visible only during development and are constant in the PRODUCT version 241255736Sdavidch// product flags are always settable / visible 242255736Sdavidch// notproduct flags are settable / visible only during development and are not declared in the PRODUCT version 243255736Sdavidch 244255736Sdavidch// A flag must be declared with one of the following types: 245255736Sdavidch// bool, intx, uintx, ccstr. 246255736Sdavidch// The type "ccstr" is an alias for "const char*" and is used 247255736Sdavidch// only in this file, because the macrology requires single-token type names. 248255736Sdavidch 249255736Sdavidch// Note: Diagnostic options not meant for VM tuning or for product modes. 250255736Sdavidch// They are to be used for VM quality assurance or field diagnosis 251255736Sdavidch// of VM bugs. They are hidden so that users will not be encouraged to 252255736Sdavidch// try them as if they were VM ordinary execution options. However, they 253255736Sdavidch// are available in the product version of the VM. Under instruction 254255736Sdavidch// from support engineers, VM customers can turn them on to collect 255255736Sdavidch// diagnostic information about VM problems. To use a VM diagnostic 256255736Sdavidch// option, you must first specify +UnlockDiagnosticVMOptions. 257255736Sdavidch// (This master switch also affects the behavior of -Xprintflags.) 258255736Sdavidch// 259255736Sdavidch// experimental flags are in support of features that are not 260255736Sdavidch// part of the officially supported product, but are available 261255736Sdavidch// for experimenting with. They could, for example, be performance 262255736Sdavidch// features that may not have undergone full or rigorous QA, but which may 263255736Sdavidch// help performance in some cases and released for experimentation 264255736Sdavidch// by the community of users and developers. This flag also allows one to 265255736Sdavidch// be able to build a fully supported product that nonetheless also 266255736Sdavidch// ships with some unsupported, lightly tested, experimental features. 267255736Sdavidch// Like the UnlockDiagnosticVMOptions flag above, there is a corresponding 268255736Sdavidch// UnlockExperimentalVMOptions flag, which allows the control and 269255736Sdavidch// modification of the experimental flags. 270255736Sdavidch// 271255736Sdavidch// manageable flags are writeable external product flags. 272255736Sdavidch// They are dynamically writeable through the JDK management interface 273255736Sdavidch// (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole. 274255736Sdavidch// These flags are external exported interface (see CCC). The list of 275255736Sdavidch// manageable flags can be queried programmatically through the management 276255736Sdavidch// interface. 277255736Sdavidch// 278255736Sdavidch// A flag can be made as "manageable" only if 279255736Sdavidch// - the flag is defined in a CCC as an external exported interface. 280255736Sdavidch// - the VM implementation supports dynamic setting of the flag. 281255736Sdavidch// This implies that the VM must *always* query the flag variable 282255736Sdavidch// and not reuse state related to the flag state at any given time. 283255736Sdavidch// - you want the flag to be queried programmatically by the customers. 284255736Sdavidch// 285255736Sdavidch// product_rw flags are writeable internal product flags. 286255736Sdavidch// They are like "manageable" flags but for internal/private use. 287255736Sdavidch// The list of product_rw flags are internal/private flags which 288255736Sdavidch// may be changed/removed in a future release. It can be set 289255736Sdavidch// through the management interface to get/set value 290255736Sdavidch// when the name of flag is supplied. 291255736Sdavidch// 292255736Sdavidch// A flag can be made as "product_rw" only if 293255736Sdavidch// - the VM implementation supports dynamic setting of the flag. 294255736Sdavidch// This implies that the VM must *always* query the flag variable 295255736Sdavidch// and not reuse state related to the flag state at any given time. 296255736Sdavidch// 297255736Sdavidch// Note that when there is a need to support develop flags to be writeable, 298255736Sdavidch// it can be done in the same way as product_rw. 299255736Sdavidch 300255736Sdavidch#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \ 301255736Sdavidch \ 302255736Sdavidch lp64_product(bool, UseCompressedOops, false, \ 303255736Sdavidch "Use 32-bit object references in 64-bit VM. " \ 304255736Sdavidch "lp64_product means flag is always constant in 32 bit VM") \ 305255736Sdavidch \ 306255736Sdavidch lp64_product(bool, CheckCompressedOops, trueInDebug, \ 307255736Sdavidch "generate checks in encoding/decoding code") \ 308255736Sdavidch \ 309255736Sdavidch /* UseMembar is theoretically a temp flag used for memory barrier \ 310255736Sdavidch * removal testing. It was supposed to be removed before FCS but has \ 311255736Sdavidch * been re-added (see 6401008) */ \ 312255736Sdavidch product(bool, UseMembar, false, \ 313255736Sdavidch "(Unstable) Issues membars on thread state transitions") \ 314255736Sdavidch \ 315255736Sdavidch product(bool, PrintCommandLineFlags, false, \ 316255736Sdavidch "Prints flags that appeared on the command line") \ 317255736Sdavidch \ 318255736Sdavidch diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \ 319255736Sdavidch "Enable normal processing of flags relating to field diagnostics")\ 320255736Sdavidch \ 321255736Sdavidch experimental(bool, UnlockExperimentalVMOptions, false, \ 322255736Sdavidch "Enable normal processing of flags relating to experimental features")\ 323255736Sdavidch \ 324255736Sdavidch product(bool, JavaMonitorsInStackTrace, true, \ 325255736Sdavidch "Print info. about Java monitor locks when the stacks are dumped")\ 326255736Sdavidch \ 327255736Sdavidch product_pd(bool, UseLargePages, \ 328255736Sdavidch "Use large page memory") \ 329255736Sdavidch \ 330255736Sdavidch develop(bool, TracePageSizes, false, \ 331255736Sdavidch "Trace page size selection and usage.") \ 332255736Sdavidch \ 333255736Sdavidch product(bool, UseNUMA, false, \ 334255736Sdavidch "Use NUMA if available") \ 335255736Sdavidch \ 336255736Sdavidch product(intx, NUMAChunkResizeWeight, 20, \ 337255736Sdavidch "Percentage (0-100) used to weight the current sample when " \ 338255736Sdavidch "computing exponentially decaying average for " \ 339255736Sdavidch "AdaptiveNUMAChunkSizing") \ 340255736Sdavidch \ 341255736Sdavidch product(intx, NUMASpaceResizeRate, 1*G, \ 342255736Sdavidch "Do not reallocate more that this amount per collection") \ 343255736Sdavidch \ 344255736Sdavidch product(bool, UseAdaptiveNUMAChunkSizing, true, \ 345255736Sdavidch "Enable adaptive chunk sizing for NUMA") \ 346255736Sdavidch \ 347255736Sdavidch product(bool, NUMAStats, false, \ 348255736Sdavidch "Print NUMA stats in detailed heap information") \ 349255736Sdavidch \ 350255736Sdavidch product(intx, NUMAPageScanRate, 256, \ 351255736Sdavidch "Maximum number of pages to include in the page scan procedure") \ 352255736Sdavidch \ 353255736Sdavidch product_pd(bool, NeedsDeoptSuspend, \ 354255736Sdavidch "True for register window machines (sparc/ia64)") \ 355255736Sdavidch \ 356255736Sdavidch product(intx, UseSSE, 99, \ 357255736Sdavidch "Highest supported SSE instructions set on x86/x64") \ 358255736Sdavidch \ 359255736Sdavidch product(uintx, LargePageSizeInBytes, 0, \ 360255736Sdavidch "Large page size (0 to let VM choose the page size") \ 361255736Sdavidch \ 362255736Sdavidch product(uintx, LargePageHeapSizeThreshold, 128*M, \ 363255736Sdavidch "Use large pages if max heap is at least this big") \ 364255736Sdavidch \ 365255736Sdavidch product(bool, ForceTimeHighResolution, false, \ 366255736Sdavidch "Using high time resolution(For Win32 only)") \ 367255736Sdavidch \ 368255736Sdavidch develop(bool, TraceItables, false, \ 369255736Sdavidch "Trace initialization and use of itables") \ 370255736Sdavidch \ 371255736Sdavidch develop(bool, TracePcPatching, false, \ 372255736Sdavidch "Trace usage of frame::patch_pc") \ 373255736Sdavidch \ 374255736Sdavidch develop(bool, TraceJumps, false, \ 375255736Sdavidch "Trace assembly jumps in thread ring buffer") \ 376255736Sdavidch \ 377255736Sdavidch develop(bool, TraceRelocator, false, \ 378255736Sdavidch "Trace the bytecode relocator") \ 379255736Sdavidch \ 380255736Sdavidch develop(bool, TraceLongCompiles, false, \ 381255736Sdavidch "Print out every time compilation is longer than " \ 382255736Sdavidch "a given threashold") \ 383255736Sdavidch \ 384255736Sdavidch develop(bool, SafepointALot, false, \ 385255736Sdavidch "Generates a lot of safepoints. Works with " \ 386255736Sdavidch "GuaranteedSafepointInterval") \ 387255736Sdavidch \ 388255736Sdavidch product_pd(bool, BackgroundCompilation, \ 389255736Sdavidch "A thread requesting compilation is not blocked during " \ 390255736Sdavidch "compilation") \ 391255736Sdavidch \ 392255736Sdavidch product(bool, PrintVMQWaitTime, false, \ 393255736Sdavidch "Prints out the waiting time in VM operation queue") \ 394255736Sdavidch \ 395255736Sdavidch develop(bool, BailoutToInterpreterForThrows, false, \ 396255736Sdavidch "Compiled methods which throws/catches exceptions will be " \ 397255736Sdavidch "deopt and intp.") \ 398255736Sdavidch \ 399255736Sdavidch develop(bool, NoYieldsInMicrolock, false, \ 400255736Sdavidch "Disable yields in microlock") \ 401255736Sdavidch \ 402255736Sdavidch develop(bool, TraceOopMapGeneration, false, \ 403255736Sdavidch "Shows oopmap generation") \ 404255736Sdavidch \ 405255736Sdavidch product(bool, MethodFlushing, true, \ 406255736Sdavidch "Reclamation of zombie and not-entrant methods") \ 407255736Sdavidch \ 408255736Sdavidch develop(bool, VerifyStack, false, \ 409255736Sdavidch "Verify stack of each thread when it is entering a runtime call") \ 410255736Sdavidch \ 411255736Sdavidch develop(bool, ForceUnreachable, false, \ 412255736Sdavidch "(amd64) Make all non code cache addresses to be unreachable with rip-rel forcing use of 64bit literal fixups") \ 413255736Sdavidch \ 414255736Sdavidch notproduct(bool, StressDerivedPointers, false, \ 415255736Sdavidch "Force scavenge when a derived pointers is detected on stack " \ 416255736Sdavidch "after rtm call") \ 417255736Sdavidch \ 418255736Sdavidch develop(bool, TraceDerivedPointers, false, \ 419255736Sdavidch "Trace traversal of derived pointers on stack") \ 420255736Sdavidch \ 421255736Sdavidch notproduct(bool, TraceCodeBlobStacks, false, \ 422255736Sdavidch "Trace stack-walk of codeblobs") \ 423255736Sdavidch \ 424255736Sdavidch product(bool, PrintJNIResolving, false, \ 425255736Sdavidch "Used to implement -v:jni") \ 426255736Sdavidch \ 427255736Sdavidch notproduct(bool, PrintRewrites, false, \ 428255736Sdavidch "Print methods that are being rewritten") \ 429255736Sdavidch \ 430255736Sdavidch product(bool, UseInlineCaches, true, \ 431255736Sdavidch "Use Inline Caches for virtual calls ") \ 432255736Sdavidch \ 433255736Sdavidch develop(bool, InlineArrayCopy, true, \ 434255736Sdavidch "inline arraycopy native that is known to be part of " \ 435255736Sdavidch "base library DLL") \ 436255736Sdavidch \ 437255736Sdavidch develop(bool, InlineObjectHash, true, \ 438255736Sdavidch "inline Object::hashCode() native that is known to be part " \ 439255736Sdavidch "of base library DLL") \ 440255736Sdavidch \ 441255736Sdavidch develop(bool, InlineObjectCopy, true, \ 442255736Sdavidch "inline Object.clone and Arrays.copyOf[Range] intrinsics") \ 443255736Sdavidch \ 444255736Sdavidch develop(bool, InlineNatives, true, \ 445255736Sdavidch "inline natives that are known to be part of base library DLL") \ 446255736Sdavidch \ 447255736Sdavidch develop(bool, InlineMathNatives, true, \ 448255736Sdavidch "inline SinD, CosD, etc.") \ 449255736Sdavidch \ 450255736Sdavidch develop(bool, InlineClassNatives, true, \ 451255736Sdavidch "inline Class.isInstance, etc") \ 452255736Sdavidch \ 453255736Sdavidch develop(bool, InlineAtomicLong, true, \ 454255736Sdavidch "inline sun.misc.AtomicLong") \ 455255736Sdavidch \ 456255736Sdavidch develop(bool, InlineThreadNatives, true, \ 457255736Sdavidch "inline Thread.currentThread, etc") \ 458255736Sdavidch \ 459255736Sdavidch develop(bool, InlineReflectionGetCallerClass, true, \ 460255736Sdavidch "inline sun.reflect.Reflection.getCallerClass(), known to be part "\ 461255736Sdavidch "of base library DLL") \ 462255736Sdavidch \ 463255736Sdavidch develop(bool, InlineUnsafeOps, true, \ 464255736Sdavidch "inline memory ops (native methods) from sun.misc.Unsafe") \ 465255736Sdavidch \ 466255736Sdavidch develop(bool, ConvertCmpD2CmpF, true, \ 467255736Sdavidch "Convert cmpD to cmpF when one input is constant in float range") \ 468255736Sdavidch \ 469255736Sdavidch develop(bool, ConvertFloat2IntClipping, true, \ 470255736Sdavidch "Convert float2int clipping idiom to integer clipping") \ 471255736Sdavidch \ 472255736Sdavidch develop(bool, SpecialStringCompareTo, true, \ 473255736Sdavidch "special version of string compareTo") \ 474255736Sdavidch \ 475255736Sdavidch develop(bool, SpecialStringIndexOf, true, \ 476255736Sdavidch "special version of string indexOf") \ 477255736Sdavidch \ 478255736Sdavidch product(bool, SpecialArraysEquals, false, \ 479258203Sedavis "special version of Arrays.equals(char[],char[])") \ 480284963Sdavidcs \ 481255736Sdavidch develop(bool, TraceCallFixup, false, \ 482255736Sdavidch "traces all call fixups") \ 483255736Sdavidch \ 484255736Sdavidch develop(bool, DeoptimizeALot, false, \ 485255736Sdavidch "deoptimize at every exit from the runtime system") \ 486255736Sdavidch \ 487255736Sdavidch develop(ccstrlist, DeoptimizeOnlyAt, "", \ 488255736Sdavidch "a comma separated list of bcis to deoptimize at") \ 489255736Sdavidch \ 490255736Sdavidch product(bool, DeoptimizeRandom, false, \ 491255736Sdavidch "deoptimize random frames on random exit from the runtime system")\ 492255736Sdavidch \ 493255736Sdavidch notproduct(bool, ZombieALot, false, \ 494255736Sdavidch "creates zombies (non-entrant) at exit from the runt. system") \ 495255736Sdavidch \ 496255736Sdavidch notproduct(bool, WalkStackALot, false, \ 497255736Sdavidch "trace stack (no print) at every exit from the runtime system") \ 498255736Sdavidch \ 499255736Sdavidch develop(bool, Debugging, false, \ 500255736Sdavidch "set when executing debug methods in debug.ccp " \ 501255736Sdavidch "(to prevent triggering assertions)") \ 502255736Sdavidch \ 503255736Sdavidch notproduct(bool, StrictSafepointChecks, trueInDebug, \ 504255736Sdavidch "Enable strict checks that safepoints cannot happen for threads " \ 505255736Sdavidch "that used No_Safepoint_Verifier") \ 506255736Sdavidch \ 507255736Sdavidch notproduct(bool, VerifyLastFrame, false, \ 508255736Sdavidch "Verify oops on last frame on entry to VM") \ 509255736Sdavidch \ 510255736Sdavidch develop(bool, TraceHandleAllocation, false, \ 511255736Sdavidch "Prints out warnings when suspicious many handles are allocated") \ 512255736Sdavidch \ 513255736Sdavidch product(bool, UseCompilerSafepoints, true, \ 514255736Sdavidch "Stop at safepoints in compiled code") \ 515255736Sdavidch \ 516255736Sdavidch product(bool, UseSplitVerifier, true, \ 517255736Sdavidch "use split verifier with StackMapTable attributes") \ 518255736Sdavidch \ 519255736Sdavidch product(bool, FailOverToOldVerifier, true, \ 520255736Sdavidch "fail over to old verifier when split verifier fails") \ 521255736Sdavidch \ 522255736Sdavidch develop(bool, ShowSafepointMsgs, false, \ 523255736Sdavidch "Show msg. about safepoint synch.") \ 524255736Sdavidch \ 525255736Sdavidch product(bool, SafepointTimeout, false, \ 526255736Sdavidch "Time out and warn or fail after SafepointTimeoutDelay " \ 527255736Sdavidch "milliseconds if failed to reach safepoint") \ 528255736Sdavidch \ 529255736Sdavidch develop(bool, DieOnSafepointTimeout, false, \ 530255736Sdavidch "Die upon failure to reach safepoint (see SafepointTimeout)") \ 531255736Sdavidch \ 532255736Sdavidch /* 50 retries * (5 * current_retry_count) millis = ~6.375 seconds */ \ 533255736Sdavidch /* typically, at most a few retries are needed */ \ 534255736Sdavidch product(intx, SuspendRetryCount, 50, \ 535255736Sdavidch "Maximum retry count for an external suspend request") \ 536255736Sdavidch \ 537255736Sdavidch product(intx, SuspendRetryDelay, 5, \ 538255736Sdavidch "Milliseconds to delay per retry (* current_retry_count)") \ 539255736Sdavidch \ 540255736Sdavidch product(bool, AssertOnSuspendWaitFailure, false, \ 541255736Sdavidch "Assert/Guarantee on external suspend wait failure") \ 542255736Sdavidch \ 543255736Sdavidch product(bool, TraceSuspendWaitFailures, false, \ 544255736Sdavidch "Trace external suspend wait failures") \ 545255736Sdavidch \ 546255736Sdavidch product(bool, MaxFDLimit, true, \ 547255736Sdavidch "Bump the number of file descriptors to max in solaris.") \ 548255736Sdavidch \ 549255736Sdavidch notproduct(bool, LogEvents, trueInDebug, \ 550255736Sdavidch "Enable Event log") \ 551255736Sdavidch \ 552255736Sdavidch product(bool, BytecodeVerificationRemote, true, \ 553255736Sdavidch "Enables the Java bytecode verifier for remote classes") \ 554255736Sdavidch \ 555255736Sdavidch product(bool, BytecodeVerificationLocal, false, \ 556255736Sdavidch "Enables the Java bytecode verifier for local classes") \ 557255736Sdavidch \ 558255736Sdavidch develop(bool, ForceFloatExceptions, trueInDebug, \ 559255736Sdavidch "Force exceptions on FP stack under/overflow") \ 560255736Sdavidch \ 561255736Sdavidch develop(bool, SoftMatchFailure, trueInProduct, \ 562255736Sdavidch "If the DFA fails to match a node, print a message and bail out") \ 563255736Sdavidch \ 564255736Sdavidch develop(bool, VerifyStackAtCalls, false, \ 565255736Sdavidch "Verify that the stack pointer is unchanged after calls") \ 566255736Sdavidch \ 567255736Sdavidch develop(bool, TraceJavaAssertions, false, \ 568255736Sdavidch "Trace java language assertions") \ 569255736Sdavidch \ 570255736Sdavidch notproduct(bool, CheckAssertionStatusDirectives, false, \ 571255736Sdavidch "temporary - see javaClasses.cpp") \ 572255736Sdavidch \ 573255736Sdavidch notproduct(bool, PrintMallocFree, false, \ 574255736Sdavidch "Trace calls to C heap malloc/free allocation") \ 575255736Sdavidch \ 576255736Sdavidch notproduct(bool, PrintOopAddress, false, \ 577255736Sdavidch "Always print the location of the oop") \ 578255736Sdavidch \ 579255736Sdavidch notproduct(bool, VerifyCodeCacheOften, false, \ 580255736Sdavidch "Verify compiled-code cache often") \ 581255736Sdavidch \ 582255736Sdavidch develop(bool, ZapDeadCompiledLocals, false, \ 583255736Sdavidch "Zap dead locals in compiler frames") \ 584255736Sdavidch \ 585255736Sdavidch notproduct(bool, ZapDeadLocalsOld, false, \ 586255736Sdavidch "Zap dead locals (old version, zaps all frames when " \ 587255736Sdavidch "entering the VM") \ 588255736Sdavidch \ 589255736Sdavidch notproduct(bool, CheckOopishValues, false, \ 590255736Sdavidch "Warn if value contains oop ( requires ZapDeadLocals)") \ 591255736Sdavidch \ 592255736Sdavidch develop(bool, UseMallocOnly, false, \ 593255736Sdavidch "use only malloc/free for allocation (no resource area/arena)") \ 594255736Sdavidch \ 595255736Sdavidch develop(bool, PrintMalloc, false, \ 596255736Sdavidch "print all malloc/free calls") \ 597255736Sdavidch \ 598255736Sdavidch develop(bool, ZapResourceArea, trueInDebug, \ 599255736Sdavidch "Zap freed resource/arena space with 0xABABABAB") \ 600255736Sdavidch \ 601255736Sdavidch notproduct(bool, ZapVMHandleArea, trueInDebug, \ 602255736Sdavidch "Zap freed VM handle space with 0xBCBCBCBC") \ 603255736Sdavidch \ 604255736Sdavidch develop(bool, ZapJNIHandleArea, trueInDebug, \ 605255736Sdavidch "Zap freed JNI handle space with 0xFEFEFEFE") \ 606255736Sdavidch \ 607255736Sdavidch develop(bool, ZapUnusedHeapArea, trueInDebug, \ 608255736Sdavidch "Zap unused heap space with 0xBAADBABE") \ 609255736Sdavidch \ 610255736Sdavidch develop(bool, TraceZapUnusedHeapArea, false, \ 611255736Sdavidch "Trace zapping of unused heap space") \ 612255736Sdavidch \ 613255736Sdavidch develop(bool, CheckZapUnusedHeapArea, false, \ 614255736Sdavidch "Check zapping of unused heap space") \ 615255736Sdavidch \ 616255736Sdavidch develop(bool, PrintVMMessages, true, \ 617255736Sdavidch "Print vm messages on console") \ 618255736Sdavidch \ 619255736Sdavidch product(bool, PrintGCApplicationConcurrentTime, false, \ 620255736Sdavidch "Print the time the application has been running") \ 621255736Sdavidch \ 622255736Sdavidch product(bool, PrintGCApplicationStoppedTime, false, \ 623255736Sdavidch "Print the time the application has been stopped") \ 624255736Sdavidch \ 625255736Sdavidch develop(bool, Verbose, false, \ 626255736Sdavidch "Prints additional debugging information from other modes") \ 627255736Sdavidch \ 628255736Sdavidch develop(bool, PrintMiscellaneous, false, \ 629255736Sdavidch "Prints uncategorized debugging information (requires +Verbose)") \ 630255736Sdavidch \ 631255736Sdavidch develop(bool, WizardMode, false, \ 632255736Sdavidch "Prints much more debugging information") \ 633255736Sdavidch \ 634255736Sdavidch product(bool, ShowMessageBoxOnError, false, \ 635255736Sdavidch "Keep process alive on VM fatal error") \ 636255736Sdavidch \ 637255736Sdavidch product_pd(bool, UseOSErrorReporting, \ 638255736Sdavidch "Let VM fatal error propagate to the OS (ie. WER on Windows)") \ 639255736Sdavidch \ 640255736Sdavidch product(bool, SuppressFatalErrorMessage, false, \ 641255736Sdavidch "Do NO Fatal Error report [Avoid deadlock]") \ 642255736Sdavidch \ 643255736Sdavidch product(ccstrlist, OnError, "", \ 644255736Sdavidch "Run user-defined commands on fatal error; see VMError.cpp " \ 645255736Sdavidch "for examples") \ 646255736Sdavidch \ 647255736Sdavidch product(ccstrlist, OnOutOfMemoryError, "", \ 648255736Sdavidch "Run user-defined commands on first java.lang.OutOfMemoryError") \ 649255736Sdavidch \ 650255736Sdavidch manageable(bool, HeapDumpOnOutOfMemoryError, false, \ 651255736Sdavidch "Dump heap to file when java.lang.OutOfMemoryError is thrown") \ 652255736Sdavidch \ 653255736Sdavidch manageable(ccstr, HeapDumpPath, NULL, \ 654255736Sdavidch "When HeapDumpOnOutOfMemoryError is on, the path (filename or" \ 655258203Sedavis "directory) of the dump file (defaults to java_pid<pid>.hprof" \ 656258203Sedavis "in the working directory)") \ 657255736Sdavidch \ 658255736Sdavidch develop(uintx, SegmentedHeapDumpThreshold, 2*G, \ 659255736Sdavidch "Generate a segmented heap dump (JAVA PROFILE 1.0.2 format) " \ 660255736Sdavidch "when the heap usage is larger than this") \ 661255736Sdavidch \ 662255736Sdavidch develop(uintx, HeapDumpSegmentSize, 1*G, \ 663255736Sdavidch "Approximate segment size when generating a segmented heap dump") \ 664255736Sdavidch \ 665255736Sdavidch develop(bool, BreakAtWarning, false, \ 666255736Sdavidch "Execute breakpoint upon encountering VM warning") \ 667255736Sdavidch \ 668255736Sdavidch product_pd(bool, UseVectoredExceptions, \ 669258203Sedavis "Temp Flag - Use Vectored Exceptions rather than SEH (Windows Only)") \ 670255736Sdavidch \ 671255736Sdavidch develop(bool, TraceVMOperation, false, \ 672255736Sdavidch "Trace vm operations") \ 673255736Sdavidch \ 674255736Sdavidch develop(bool, UseFakeTimers, false, \ 675255736Sdavidch "Tells whether the VM should use system time or a fake timer") \ 676255736Sdavidch \ 677255736Sdavidch diagnostic(bool, LogCompilation, false, \ 678255736Sdavidch "Log compilation activity in detail to hotspot.log or LogFile") \ 679255736Sdavidch \ 680255736Sdavidch product(bool, PrintCompilation, false, \ 681255736Sdavidch "Print compilations") \ 682255736Sdavidch \ 683255736Sdavidch diagnostic(bool, TraceNMethodInstalls, false, \ 684255736Sdavidch "Trace nmethod intallation") \ 685255736Sdavidch \ 686255736Sdavidch diagnostic(bool, TraceOSRBreakpoint, false, \ 687255736Sdavidch "Trace OSR Breakpoint ") \ 688255736Sdavidch \ 689255736Sdavidch diagnostic(bool, TraceCompileTriggered, false, \ 690255736Sdavidch "Trace compile triggered") \ 691255736Sdavidch \ 692255736Sdavidch diagnostic(bool, TraceTriggers, false, \ 693255736Sdavidch "Trace triggers") \ 694255736Sdavidch \ 695255736Sdavidch product(bool, AlwaysRestoreFPU, false, \ 696255736Sdavidch "Restore the FPU control word after every JNI call (expensive)") \ 697255736Sdavidch \ 698255736Sdavidch notproduct(bool, PrintCompilation2, false, \ 699255736Sdavidch "Print additional statistics per compilation") \ 700255736Sdavidch \ 701255736Sdavidch diagnostic(bool, PrintAdapterHandlers, false, \ 702255736Sdavidch "Print code generated for i2c/c2i adapters") \ 703255736Sdavidch \ 704255736Sdavidch diagnostic(bool, PrintAssembly, false, \ 705255736Sdavidch "Print assembly code (using external disassembler.so)") \ 706255736Sdavidch \ 707255736Sdavidch diagnostic(ccstr, PrintAssemblyOptions, false, \ 708255736Sdavidch "Options string passed to disassembler.so") \ 709255736Sdavidch \ 710255736Sdavidch diagnostic(bool, PrintNMethods, false, \ 711255736Sdavidch "Print assembly code for nmethods when generated") \ 712255736Sdavidch \ 713255736Sdavidch diagnostic(bool, PrintNativeNMethods, false, \ 714255736Sdavidch "Print assembly code for native nmethods when generated") \ 715255736Sdavidch \ 716255736Sdavidch develop(bool, PrintDebugInfo, false, \ 717255736Sdavidch "Print debug information for all nmethods when generated") \ 718255736Sdavidch \ 719255736Sdavidch develop(bool, PrintRelocations, false, \ 720255736Sdavidch "Print relocation information for all nmethods when generated") \ 721255736Sdavidch \ 722255736Sdavidch develop(bool, PrintDependencies, false, \ 723255736Sdavidch "Print dependency information for all nmethods when generated") \ 724255736Sdavidch \ 725255736Sdavidch develop(bool, PrintExceptionHandlers, false, \ 726255736Sdavidch "Print exception handler tables for all nmethods when generated") \ 727255736Sdavidch \ 728255736Sdavidch develop(bool, InterceptOSException, false, \ 729255736Sdavidch "Starts debugger when an implicit OS (e.g., NULL) " \ 730255736Sdavidch "exception happens") \ 731255736Sdavidch \ 732255736Sdavidch notproduct(bool, PrintCodeCache, false, \ 733255736Sdavidch "Print the compiled_code cache when exiting") \ 734255736Sdavidch \ 735255736Sdavidch develop(bool, PrintCodeCache2, false, \ 736255736Sdavidch "Print detailed info on the compiled_code cache when exiting") \ 737255736Sdavidch \ 738255736Sdavidch diagnostic(bool, PrintStubCode, false, \ 739255736Sdavidch "Print generated stub code") \ 740255736Sdavidch \ 741255736Sdavidch product(bool, StackTraceInThrowable, true, \ 742296580Sdavidcs "Collect backtrace in throwable when exception happens") \ 743296580Sdavidcs \ 744296580Sdavidcs product(bool, OmitStackTraceInFastThrow, true, \ 745296580Sdavidcs "Omit backtraces for some 'hot' exceptions in optimized code") \ 746296580Sdavidcs \ 747296580Sdavidcs product(bool, ProfilerPrintByteCodeStatistics, false, \ 748296580Sdavidcs "Prints byte code statictics when dumping profiler output") \ 749296580Sdavidcs \ 750296580Sdavidcs product(bool, ProfilerRecordPC, false, \ 751296580Sdavidcs "Collects tick for each 16 byte interval of compiled code") \ 752296580Sdavidcs \ 753296580Sdavidcs product(bool, ProfileVM, false, \ 754296580Sdavidcs "Profiles ticks that fall within VM (either in the VM Thread " \ 755296580Sdavidcs "or VM code called through stubs)") \ 756296580Sdavidcs \ 757296580Sdavidcs product(bool, ProfileIntervals, false, \ 758296580Sdavidcs "Prints profiles for each interval (see ProfileIntervalsTicks)") \ 759296580Sdavidcs \ 760296580Sdavidcs notproduct(bool, ProfilerCheckIntervals, false, \ 761296580Sdavidcs "Collect and print info on spacing of profiler ticks") \ 762255736Sdavidch \ 763255736Sdavidch develop(bool, PrintJVMWarnings, false, \ 764255736Sdavidch "Prints warnings for unimplemented JVM functions") \ 765255736Sdavidch \ 766255736Sdavidch notproduct(uintx, WarnOnStalledSpinLock, 0, \ 767255736Sdavidch "Prints warnings for stalled SpinLocks") \ 768255736Sdavidch \ 769255736Sdavidch develop(bool, InitializeJavaLangSystem, true, \ 770255736Sdavidch "Initialize java.lang.System - turn off for individual " \ 771255736Sdavidch "method debugging") \ 772255736Sdavidch \ 773255736Sdavidch develop(bool, InitializeJavaLangString, true, \ 774255736Sdavidch "Initialize java.lang.String - turn off for individual " \ 775255736Sdavidch "method debugging") \ 776255736Sdavidch \ 777255736Sdavidch develop(bool, InitializeJavaLangExceptionsErrors, true, \ 778255736Sdavidch "Initialize various error and exception classes - turn off for " \ 779255736Sdavidch "individual method debugging") \ 780255736Sdavidch \ 781255736Sdavidch product(bool, RegisterFinalizersAtInit, true, \ 782255736Sdavidch "Register finalizable objects at end of Object.<init> or " \ 783255736Sdavidch "after allocation.") \ 784255736Sdavidch \ 785255736Sdavidch develop(bool, RegisterReferences, true, \ 786255736Sdavidch "Tells whether the VM should register soft/weak/final/phantom " \ 787255736Sdavidch "references") \ 788255736Sdavidch \ 789255736Sdavidch develop(bool, IgnoreRewrites, false, \ 790255736Sdavidch "Supress rewrites of bytecodes in the oopmap generator. " \ 791255736Sdavidch "This is unsafe!") \ 792255736Sdavidch \ 793255736Sdavidch develop(bool, PrintCodeCacheExtension, false, \ 794255736Sdavidch "Print extension of code cache") \ 795255736Sdavidch \ 796255736Sdavidch develop(bool, UsePrivilegedStack, true, \ 797255736Sdavidch "Enable the security JVM functions") \ 798255736Sdavidch \ 799255736Sdavidch develop(bool, IEEEPrecision, true, \ 800255736Sdavidch "Enables IEEE precision (for INTEL only)") \ 801255736Sdavidch \ 802255736Sdavidch develop(bool, ProtectionDomainVerification, true, \ 803255736Sdavidch "Verifies protection domain before resolution in system " \ 804255736Sdavidch "dictionary") \ 805255736Sdavidch \ 806255736Sdavidch product(bool, ClassUnloading, true, \ 807255736Sdavidch "Do unloading of classes") \ 808255736Sdavidch \ 809255736Sdavidch diagnostic(bool, LinkWellKnownClasses, true, \ 810255736Sdavidch "Resolve a well known class as soon as its name is seen") \ 811255736Sdavidch \ 812255736Sdavidch develop(bool, DisableStartThread, false, \ 813255736Sdavidch "Disable starting of additional Java threads " \ 814255736Sdavidch "(for debugging only)") \ 815255736Sdavidch \ 816255736Sdavidch develop(bool, MemProfiling, false, \ 817255736Sdavidch "Write memory usage profiling to log file") \ 818255736Sdavidch \ 819255736Sdavidch notproduct(bool, PrintSystemDictionaryAtExit, false, \ 820296580Sdavidcs "Prints the system dictionary at exit") \ 821296580Sdavidcs \ 822296580Sdavidcs diagnostic(bool, UnsyncloadClass, false, \ 823296580Sdavidcs "Unstable: VM calls loadClass unsynchronized. Custom classloader "\ 824296580Sdavidcs "must call VM synchronized for findClass & defineClass") \ 825296580Sdavidcs \ 826296580Sdavidcs product_pd(bool, DontYieldALot, \ 827296580Sdavidcs "Throw away obvious excess yield calls (for SOLARIS only)") \ 828296580Sdavidcs \ 829296580Sdavidcs product_pd(bool, ConvertSleepToYield, \ 830296580Sdavidcs "Converts sleep(0) to thread yield " \ 831296580Sdavidcs "(may be off for SOLARIS to improve GUI)") \ 832296580Sdavidcs \ 833296580Sdavidcs product(bool, ConvertYieldToSleep, false, \ 834296580Sdavidcs "Converts yield to a sleep of MinSleepInterval to simulate Win32 "\ 835296580Sdavidcs "behavior (SOLARIS only)") \ 836296580Sdavidcs \ 837296580Sdavidcs product(bool, UseBoundThreads, true, \ 838296580Sdavidcs "Bind user level threads to kernel threads (for SOLARIS only)") \ 839296580Sdavidcs \ 840296580Sdavidcs develop(bool, UseDetachedThreads, true, \ 841296580Sdavidcs "Use detached threads that are recycled upon termination " \ 842255736Sdavidch "(for SOLARIS only)") \ 843255736Sdavidch \ 844255736Sdavidch product(bool, UseLWPSynchronization, true, \ 845255736Sdavidch "Use LWP-based instead of libthread-based synchronization " \ 846255736Sdavidch "(SPARC only)") \ 847255736Sdavidch \ 848255736Sdavidch product(ccstr, SyncKnobs, "", \ 849255736Sdavidch "(Unstable) Various monitor synchronization tunables") \ 850255736Sdavidch \ 851255736Sdavidch product(intx, EmitSync, 0, \ 852255736Sdavidch "(Unsafe,Unstable) " \ 853255736Sdavidch " Controls emission of inline sync fast-path code") \ 854255736Sdavidch \ 855255736Sdavidch product(intx, AlwaysInflate, 0, "(Unstable) Force inflation") \ 856255736Sdavidch \ 857255736Sdavidch product(intx, Atomics, 0, \ 858255736Sdavidch "(Unsafe,Unstable) Diagnostic - Controls emission of atomics") \ 859255736Sdavidch \ 860255736Sdavidch product(intx, FenceInstruction, 0, \ 861255736Sdavidch "(Unsafe,Unstable) Experimental") \ 862255736Sdavidch \ 863255736Sdavidch product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \ 864255736Sdavidch \ 865255736Sdavidch product(intx, SyncVerbose, 0, "(Unstable)" ) \ 866255736Sdavidch \ 867255736Sdavidch product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" ) \ 868255736Sdavidch \ 869255736Sdavidch product(intx, hashCode, 0, \ 870255736Sdavidch "(Unstable) select hashCode generation algorithm" ) \ 871255736Sdavidch \ 872255736Sdavidch product(intx, WorkAroundNPTLTimedWaitHang, 1, \ 873255736Sdavidch "(Unstable, Linux-specific)" \ 874255736Sdavidch " avoid NPTL-FUTEX hang pthread_cond_timedwait" ) \ 875255736Sdavidch \ 876255736Sdavidch product(bool, FilterSpuriousWakeups , true, \ 877255736Sdavidch "Prevent spurious or premature wakeups from object.wait" \ 878255736Sdavidch "(Solaris only)") \ 879255736Sdavidch \ 880255736Sdavidch product(intx, NativeMonitorTimeout, -1, "(Unstable)" ) \ 881255736Sdavidch product(intx, NativeMonitorFlags, 0, "(Unstable)" ) \ 882255736Sdavidch product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" ) \ 883255736Sdavidch \ 884255736Sdavidch develop(bool, UsePthreads, false, \ 885255736Sdavidch "Use pthread-based instead of libthread-based synchronization " \ 886255736Sdavidch "(SPARC only)") \ 887255736Sdavidch \ 888255736Sdavidch product(bool, AdjustConcurrency, false, \ 889255736Sdavidch "call thr_setconcurrency at thread create time to avoid " \ 890255736Sdavidch "LWP starvation on MP systems (For Solaris Only)") \ 891255736Sdavidch \ 892255736Sdavidch develop(bool, UpdateHotSpotCompilerFileOnError, true, \ 893255736Sdavidch "Should the system attempt to update the compiler file when " \ 894255736Sdavidch "an error occurs?") \ 895255736Sdavidch \ 896255736Sdavidch product(bool, ReduceSignalUsage, false, \ 897255736Sdavidch "Reduce the use of OS signals in Java and/or the VM") \ 898255736Sdavidch \ 899255736Sdavidch notproduct(bool, ValidateMarkSweep, false, \ 900255736Sdavidch "Do extra validation during MarkSweep collection") \ 901255736Sdavidch \ 902255736Sdavidch notproduct(bool, RecordMarkSweepCompaction, false, \ 903255736Sdavidch "Enable GC-to-GC recording and querying of compaction during " \ 904255736Sdavidch "MarkSweep") \ 905255736Sdavidch \ 906255736Sdavidch develop_pd(bool, ShareVtableStubs, \ 907255736Sdavidch "Share vtable stubs (smaller code but worse branch prediction") \ 908255736Sdavidch \ 909255736Sdavidch develop(bool, LoadLineNumberTables, true, \ 910255736Sdavidch "Tells whether the class file parser loads line number tables") \ 911255736Sdavidch \ 912255736Sdavidch develop(bool, LoadLocalVariableTables, true, \ 913255736Sdavidch "Tells whether the class file parser loads local variable tables")\ 914255736Sdavidch \ 915255736Sdavidch develop(bool, LoadLocalVariableTypeTables, true, \ 916255736Sdavidch "Tells whether the class file parser loads local variable type tables")\ 917255736Sdavidch \ 918255736Sdavidch product(bool, AllowUserSignalHandlers, false, \ 919255736Sdavidch "Do not complain if the application installs signal handlers " \ 920255736Sdavidch "(Solaris & Linux only)") \ 921255736Sdavidch \ 922255736Sdavidch product(bool, UseSignalChaining, true, \ 923255736Sdavidch "Use signal-chaining to invoke signal handlers installed " \ 924255736Sdavidch "by the application (Solaris & Linux only)") \ 925255736Sdavidch \ 926255736Sdavidch product(bool, UseAltSigs, false, \ 927255736Sdavidch "Use alternate signals instead of SIGUSR1 & SIGUSR2 for VM " \ 928255736Sdavidch "internal signals. (Solaris only)") \ 929255736Sdavidch \ 930255736Sdavidch product(bool, UseSpinning, false, \ 931255736Sdavidch "Use spinning in monitor inflation and before entry") \ 932255736Sdavidch \ 933255736Sdavidch product(bool, PreSpinYield, false, \ 934255736Sdavidch "Yield before inner spinning loop") \ 935255736Sdavidch \ 936255736Sdavidch product(bool, PostSpinYield, true, \ 937255736Sdavidch "Yield after inner spinning loop") \ 938255736Sdavidch \ 939255736Sdavidch product(bool, AllowJNIEnvProxy, false, \ 940255736Sdavidch "Allow JNIEnv proxies for jdbx") \ 941255736Sdavidch \ 942255736Sdavidch product(bool, JNIDetachReleasesMonitors, true, \ 943255736Sdavidch "JNI DetachCurrentThread releases monitors owned by thread") \ 944255736Sdavidch \ 945255736Sdavidch product(bool, RestoreMXCSROnJNICalls, false, \ 946255736Sdavidch "Restore MXCSR when returning from JNI calls") \ 947255736Sdavidch \ 948255736Sdavidch product(bool, CheckJNICalls, false, \ 949255736Sdavidch "Verify all arguments to JNI calls") \ 950255736Sdavidch \ 951255736Sdavidch product(bool, UseFastJNIAccessors, true, \ 952255736Sdavidch "Use optimized versions of Get<Primitive>Field") \ 953255736Sdavidch \ 954255736Sdavidch product(bool, EagerXrunInit, false, \ 955255736Sdavidch "Eagerly initialize -Xrun libraries; allows startup profiling, " \ 956255736Sdavidch " but not all -Xrun libraries may support the state of the VM at this time") \ 957255736Sdavidch \ 958255736Sdavidch product(bool, PreserveAllAnnotations, false, \ 959255736Sdavidch "Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \ 960255736Sdavidch \ 961255736Sdavidch develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ 962255736Sdavidch "Number of OutOfMemoryErrors preallocated with backtrace") \ 963255736Sdavidch \ 964255736Sdavidch product(bool, LazyBootClassLoader, true, \ 965255736Sdavidch "Enable/disable lazy opening of boot class path entries") \ 966255736Sdavidch \ 967255736Sdavidch diagnostic(bool, UseIncDec, true, \ 968255736Sdavidch "Use INC, DEC instructions on x86") \ 969255736Sdavidch \ 970255736Sdavidch product(bool, UseNewLongLShift, false, \ 971255736Sdavidch "Use optimized bitwise shift left") \ 972255736Sdavidch \ 973255736Sdavidch product(bool, UseStoreImmI16, true, \ 974255736Sdavidch "Use store immediate 16-bits value instruction on x86") \ 975255736Sdavidch \ 976255736Sdavidch product(bool, UseAddressNop, false, \ 977255736Sdavidch "Use '0F 1F [addr]' NOP instructions on x86 cpus") \ 978255736Sdavidch \ 979255736Sdavidch product(bool, UseXmmLoadAndClearUpper, true, \ 980255736Sdavidch "Load low part of XMM register and clear upper part") \ 981255736Sdavidch \ 982255736Sdavidch product(bool, UseXmmRegToRegMoveAll, false, \ 983255736Sdavidch "Copy all XMM register bits when moving value between registers") \ 984255736Sdavidch \ 985255736Sdavidch product(bool, UseXmmI2D, false, \ 986255736Sdavidch "Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \ 987255736Sdavidch \ 988255736Sdavidch product(bool, UseXmmI2F, false, \ 989255736Sdavidch "Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \ 990255736Sdavidch \ 991255736Sdavidch product(intx, FieldsAllocationStyle, 1, \ 992255736Sdavidch "0 - type based with oops first, 1 - with oops last") \ 993255736Sdavidch \ 994255736Sdavidch product(bool, CompactFields, true, \ 995255736Sdavidch "Allocate nonstatic fields in gaps between previous fields") \ 996255736Sdavidch \ 997255736Sdavidch notproduct(bool, PrintCompactFieldsSavings, false, \ 998255736Sdavidch "Print how many words were saved with CompactFields") \ 999255736Sdavidch \ 1000255736Sdavidch product(bool, UseBiasedLocking, true, \ 1001255736Sdavidch "Enable biased locking in JVM") \ 1002255736Sdavidch \ 1003255736Sdavidch product(intx, BiasedLockingStartupDelay, 4000, \ 1004255736Sdavidch "Number of milliseconds to wait before enabling biased locking") \ 1005255736Sdavidch \ 1006255736Sdavidch diagnostic(bool, PrintBiasedLockingStatistics, false, \ 1007255736Sdavidch "Print statistics of biased locking in JVM") \ 1008255736Sdavidch \ 1009255736Sdavidch product(intx, BiasedLockingBulkRebiasThreshold, 20, \ 1010255736Sdavidch "Threshold of number of revocations per type to try to " \ 1011255736Sdavidch "rebias all objects in the heap of that type") \ 1012255736Sdavidch \ 1013255736Sdavidch product(intx, BiasedLockingBulkRevokeThreshold, 40, \ 1014255736Sdavidch "Threshold of number of revocations per type to permanently " \ 1015255736Sdavidch "revoke biases of all objects in the heap of that type") \ 1016255736Sdavidch \ 1017255736Sdavidch product(intx, BiasedLockingDecayTime, 25000, \ 1018255736Sdavidch "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \ 1019255736Sdavidch "type after previous bulk rebias") \ 1020255736Sdavidch \ 1021255736Sdavidch /* tracing */ \ 1022255736Sdavidch \ 1023255736Sdavidch notproduct(bool, TraceRuntimeCalls, false, \ 1024255736Sdavidch "Trace run-time calls") \ 1025255736Sdavidch \ 1026255736Sdavidch develop(bool, TraceJNICalls, false, \ 1027255736Sdavidch "Trace JNI calls") \ 1028255736Sdavidch \ 1029255736Sdavidch notproduct(bool, TraceJVMCalls, false, \ 1030296580Sdavidcs "Trace JVM calls") \ 1031255736Sdavidch \ 1032255736Sdavidch product(ccstr, TraceJVMTI, "", \ 1033255736Sdavidch "Trace flags for JVMTI functions and events") \ 1034255736Sdavidch \ 1035255736Sdavidch /* This option can change an EMCP method into an obsolete method. */ \ 1036255736Sdavidch /* This can affect tests that except specific methods to be EMCP. */ \ 1037255736Sdavidch /* This option should be used with caution. */ \ 1038255736Sdavidch product(bool, StressLdcRewrite, false, \ 1039255736Sdavidch "Force ldc -> ldc_w rewrite during RedefineClasses") \ 1040255736Sdavidch \ 1041255736Sdavidch product(intx, TraceRedefineClasses, 0, \ 1042255736Sdavidch "Trace level for JVMTI RedefineClasses") \ 1043255736Sdavidch \ 1044255736Sdavidch /* change to false by default sometime after Mustang */ \ 1045255736Sdavidch product(bool, VerifyMergedCPBytecodes, true, \ 1046255736Sdavidch "Verify bytecodes after RedefineClasses constant pool merging") \ 1047255736Sdavidch \ 1048296580Sdavidcs develop(bool, TraceJNIHandleAllocation, false, \ 1049296580Sdavidcs "Trace allocation/deallocation of JNI handle blocks") \ 1050255736Sdavidch \ 1051255736Sdavidch develop(bool, TraceThreadEvents, false, \ 1052255736Sdavidch "Trace all thread events") \ 1053255736Sdavidch \ 1054255736Sdavidch develop(bool, TraceBytecodes, false, \ 1055255736Sdavidch "Trace bytecode execution") \ 1056255736Sdavidch \ 1057255736Sdavidch develop(bool, TraceClassInitialization, false, \ 1058255736Sdavidch "Trace class initialization") \ 1059255736Sdavidch \ 1060255736Sdavidch develop(bool, TraceExceptions, false, \ 1061255736Sdavidch "Trace exceptions") \ 1062255736Sdavidch \ 1063255736Sdavidch develop(bool, TraceICs, false, \ 1064255736Sdavidch "Trace inline cache changes") \ 1065255736Sdavidch \ 1066255736Sdavidch notproduct(bool, TraceInvocationCounterOverflow, false, \ 1067255736Sdavidch "Trace method invocation counter overflow") \ 1068255736Sdavidch \ 1069255736Sdavidch develop(bool, TraceInlineCacheClearing, false, \ 1070255736Sdavidch "Trace clearing of inline caches in nmethods") \ 1071255736Sdavidch \ 1072255736Sdavidch develop(bool, TraceDependencies, false, \ 1073255736Sdavidch "Trace dependencies") \ 1074255736Sdavidch \ 1075255736Sdavidch develop(bool, VerifyDependencies, trueInDebug, \ 1076255736Sdavidch "Exercise and verify the compilation dependency mechanism") \ 1077255736Sdavidch \ 1078255736Sdavidch develop(bool, TraceNewOopMapGeneration, false, \ 1079255736Sdavidch "Trace OopMapGeneration") \ 1080255736Sdavidch \ 1081255736Sdavidch develop(bool, TraceNewOopMapGenerationDetailed, false, \ 1082255736Sdavidch "Trace OopMapGeneration: print detailed cell states") \ 1083255736Sdavidch \ 1084255736Sdavidch develop(bool, TimeOopMap, false, \ 1085255736Sdavidch "Time calls to GenerateOopMap::compute_map() in sum") \ 1086255736Sdavidch \ 1087255736Sdavidch develop(bool, TimeOopMap2, false, \ 1088255736Sdavidch "Time calls to GenerateOopMap::compute_map() individually") \ 1089255736Sdavidch \ 1090255736Sdavidch develop(bool, TraceMonitorMismatch, false, \ 1091255736Sdavidch "Trace monitor matching failures during OopMapGeneration") \ 1092255736Sdavidch \ 1093255736Sdavidch develop(bool, TraceOopMapRewrites, false, \ 1094255736Sdavidch "Trace rewritting of method oops during oop map generation") \ 1095255736Sdavidch \ 1096255736Sdavidch develop(bool, TraceSafepoint, false, \ 1097255736Sdavidch "Trace safepoint operations") \ 1098255736Sdavidch \ 1099255736Sdavidch develop(bool, TraceICBuffer, false, \ 1100255736Sdavidch "Trace usage of IC buffer") \ 1101255736Sdavidch \ 1102255736Sdavidch develop(bool, TraceCompiledIC, false, \ 1103255736Sdavidch "Trace changes of compiled IC") \ 1104255736Sdavidch \ 1105255736Sdavidch notproduct(bool, TraceZapDeadLocals, false, \ 1106255736Sdavidch "Trace zapping dead locals") \ 1107255736Sdavidch \ 1108255736Sdavidch develop(bool, TraceStartupTime, false, \ 1109255736Sdavidch "Trace setup time") \ 1110255736Sdavidch \ 1111255736Sdavidch develop(bool, TraceHPI, false, \ 1112255736Sdavidch "Trace Host Porting Interface (HPI)") \ 1113255736Sdavidch \ 1114255736Sdavidch product(ccstr, HPILibPath, NULL, \ 1115255736Sdavidch "Specify alternate path to HPI library") \ 1116255736Sdavidch \ 1117255736Sdavidch develop(bool, TraceProtectionDomainVerification, false, \ 1118255736Sdavidch "Trace protection domain verifcation") \ 1119255736Sdavidch \ 1120255736Sdavidch develop(bool, TraceClearedExceptions, false, \ 1121255736Sdavidch "Prints when an exception is forcibly cleared") \ 1122255736Sdavidch \ 1123255736Sdavidch product(bool, TraceClassResolution, false, \ 1124255736Sdavidch "Trace all constant pool resolutions (for debugging)") \ 1125255736Sdavidch \ 1126255736Sdavidch product(bool, TraceBiasedLocking, false, \ 1127255736Sdavidch "Trace biased locking in JVM") \ 1128255736Sdavidch \ 1129255736Sdavidch product(bool, TraceMonitorInflation, false, \ 1130255736Sdavidch "Trace monitor inflation in JVM") \ 1131255736Sdavidch \ 1132255736Sdavidch /* assembler */ \ 1133255736Sdavidch product(bool, Use486InstrsOnly, false, \ 1134255736Sdavidch "Use 80486 Compliant instruction subset") \ 1135255736Sdavidch \ 1136255736Sdavidch /* gc */ \ 1137255736Sdavidch \ 1138255736Sdavidch product(bool, UseSerialGC, false, \ 1139255736Sdavidch "Use the serial garbage collector") \ 1140255736Sdavidch \ 1141255736Sdavidch experimental(bool, UseG1GC, false, \ 1142255736Sdavidch "Use the Garbage-First garbage collector") \ 1143255736Sdavidch \ 1144255736Sdavidch product(bool, UseParallelGC, false, \ 1145255736Sdavidch "Use the Parallel Scavenge garbage collector") \ 1146255736Sdavidch \ 1147255736Sdavidch product(bool, UseParallelOldGC, false, \ 1148255736Sdavidch "Use the Parallel Old garbage collector") \ 1149255736Sdavidch \ 1150255736Sdavidch product(bool, UseParallelOldGCCompacting, true, \ 1151255736Sdavidch "In the Parallel Old garbage collector use parallel compaction") \ 1152255736Sdavidch \ 1153255736Sdavidch product(bool, UseParallelDensePrefixUpdate, true, \ 1154255736Sdavidch "In the Parallel Old garbage collector use parallel dense" \ 1155255736Sdavidch " prefix update") \ 1156255736Sdavidch \ 1157255736Sdavidch develop(bool, UseParallelOldGCChunkPointerCalc, true, \ 1158255736Sdavidch "In the Parallel Old garbage collector use chucks to calculate" \ 1159255736Sdavidch " new object locations") \ 1160255736Sdavidch \ 1161255736Sdavidch product(uintx, HeapMaximumCompactionInterval, 20, \ 1162255736Sdavidch "How often should we maximally compact the heap (not allowing " \ 1163255736Sdavidch "any dead space)") \ 1164255736Sdavidch \ 1165255736Sdavidch product(uintx, HeapFirstMaximumCompactionCount, 3, \ 1166255736Sdavidch "The collection count for the first maximum compaction") \ 1167255736Sdavidch \ 1168255736Sdavidch product(bool, UseMaximumCompactionOnSystemGC, true, \ 1169255736Sdavidch "In the Parallel Old garbage collector maximum compaction for " \ 1170255736Sdavidch "a system GC") \ 1171255736Sdavidch \ 1172255736Sdavidch product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ 1173255736Sdavidch "The mean used by the par compact dead wood" \ 1174255736Sdavidch "limiter (a number between 0-100).") \ 1175255736Sdavidch \ 1176255736Sdavidch product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ 1177255736Sdavidch "The standard deviation used by the par compact dead wood" \ 1178255736Sdavidch "limiter (a number between 0-100).") \ 1179255736Sdavidch \ 1180255736Sdavidch product(bool, UseParallelOldGCDensePrefix, true, \ 1181255736Sdavidch "Use a dense prefix with the Parallel Old garbage collector") \ 1182255736Sdavidch \ 1183255736Sdavidch product(uintx, ParallelGCThreads, 0, \ 1184255736Sdavidch "Number of parallel threads parallel gc will use") \ 1185255736Sdavidch \ 1186255736Sdavidch product(uintx, ParallelCMSThreads, 0, \ 1187255736Sdavidch "Max number of threads CMS will use for concurrent work") \ 1188255736Sdavidch \ 1189255736Sdavidch develop(bool, VerifyParallelOldWithMarkSweep, false, \ 1190255736Sdavidch "Use the MarkSweep code to verify phases of Parallel Old") \ 1191255736Sdavidch \ 1192255736Sdavidch develop(uintx, VerifyParallelOldWithMarkSweepInterval, 1, \ 1193255736Sdavidch "Interval at which the MarkSweep code is used to verify " \ 1194255736Sdavidch "phases of Parallel Old") \ 1195255736Sdavidch \ 1196255736Sdavidch develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \ 1197255736Sdavidch "Use the Parallel Old MT unsafe in marking the bitmap") \ 1198255736Sdavidch \ 1199255736Sdavidch develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \ 1200255736Sdavidch "Use the Parallel Old MT unsafe in update of live size") \ 1201255736Sdavidch \ 1202255736Sdavidch develop(bool, TraceChunkTasksQueuing, false, \ 1203255736Sdavidch "Trace the queuing of the chunk tasks") \ 1204255736Sdavidch \ 1205255736Sdavidch product(uintx, ParallelMarkingThreads, 0, \ 1206255736Sdavidch "Number of marking threads concurrent gc will use") \ 1207255736Sdavidch \ 1208255736Sdavidch product(uintx, YoungPLABSize, 4096, \ 1209255736Sdavidch "Size of young gen promotion labs (in HeapWords)") \ 1210255736Sdavidch \ 1211255736Sdavidch product(uintx, OldPLABSize, 1024, \ 1212255736Sdavidch "Size of old gen promotion labs (in HeapWords)") \ 1213255736Sdavidch \ 1214255736Sdavidch product(uintx, GCTaskTimeStampEntries, 200, \ 1215255736Sdavidch "Number of time stamp entries per gc worker thread") \ 1216255736Sdavidch \ 1217255736Sdavidch product(bool, AlwaysTenure, false, \ 1218255736Sdavidch "Always tenure objects in eden. (ParallelGC only)") \ 1219255736Sdavidch \ 1220255736Sdavidch product(bool, NeverTenure, false, \ 1221255736Sdavidch "Never tenure objects in eden, May tenure on overflow" \ 1222255736Sdavidch " (ParallelGC only)") \ 1223255736Sdavidch \ 1224255736Sdavidch product(bool, ScavengeBeforeFullGC, true, \ 1225255736Sdavidch "Scavenge youngest generation before each full GC," \ 1226255736Sdavidch " used with UseParallelGC") \ 1227255736Sdavidch \ 1228255736Sdavidch develop(bool, ScavengeWithObjectsInToSpace, false, \ 1229255736Sdavidch "Allow scavenges to occur when to_space contains objects.") \ 1230255736Sdavidch \ 1231255736Sdavidch product(bool, UseConcMarkSweepGC, false, \ 1232255736Sdavidch "Use Concurrent Mark-Sweep GC in the old generation") \ 1233255736Sdavidch \ 1234255736Sdavidch product(bool, ExplicitGCInvokesConcurrent, false, \ 1235255736Sdavidch "A System.gc() request invokes a concurrent collection;" \ 1236255736Sdavidch " (effective only when UseConcMarkSweepGC)") \ 1237255736Sdavidch \ 1238255736Sdavidch product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ 1239255736Sdavidch "A System.gc() request invokes a concurrent collection and" \ 1240255736Sdavidch " also unloads classes during such a concurrent gc cycle " \ 1241255736Sdavidch " (effective only when UseConcMarkSweepGC)") \ 1242255736Sdavidch \ 1243255736Sdavidch develop(bool, UseCMSAdaptiveFreeLists, true, \ 1244255736Sdavidch "Use Adaptive Free Lists in the CMS generation") \ 1245255736Sdavidch \ 1246255736Sdavidch develop(bool, UseAsyncConcMarkSweepGC, true, \ 1247255736Sdavidch "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\ 1248255736Sdavidch \ 1249255736Sdavidch develop(bool, RotateCMSCollectionTypes, false, \ 1250255736Sdavidch "Rotate the CMS collections among concurrent and STW") \ 1251255736Sdavidch \ 1252255736Sdavidch product(bool, UseCMSBestFit, true, \ 1253255736Sdavidch "Use CMS best fit allocation strategy") \ 1254255736Sdavidch \ 1255255736Sdavidch product(bool, UseCMSCollectionPassing, true, \ 1256255736Sdavidch "Use passing of collection from background to foreground") \ 1257255736Sdavidch \ 1258255736Sdavidch product(bool, UseParNewGC, false, \ 1259255736Sdavidch "Use parallel threads in the new generation.") \ 1260255736Sdavidch \ 1261255736Sdavidch product(bool, ParallelGCVerbose, false, \ 1262255736Sdavidch "Verbose output for parallel GC.") \ 1263255736Sdavidch \ 1264255736Sdavidch product(intx, ParallelGCBufferWastePct, 10, \ 1265255736Sdavidch "wasted fraction of parallel allocation buffer.") \ 1266255736Sdavidch \ 1267255736Sdavidch product(bool, ParallelGCRetainPLAB, true, \ 1268296580Sdavidcs "Retain parallel allocation buffers across scavenges.") \ 1269296580Sdavidcs \ 1270296580Sdavidcs product(intx, TargetPLABWastePct, 10, \ 1271296580Sdavidcs "target wasted space in last buffer as pct of overall allocation")\ 1272296580Sdavidcs \ 1273296580Sdavidcs product(uintx, PLABWeight, 75, \ 1274296580Sdavidcs "Percentage (0-100) used to weight the current sample when" \ 1275296580Sdavidcs "computing exponentially decaying average for ResizePLAB.") \ 1276296580Sdavidcs \ 1277296580Sdavidcs product(bool, ResizePLAB, true, \ 1278296580Sdavidcs "Dynamically resize (survivor space) promotion labs") \ 1279296580Sdavidcs \ 1280296580Sdavidcs product(bool, PrintPLAB, false, \ 1281296580Sdavidcs "Print (survivor space) promotion labs sizing decisions") \ 1282296580Sdavidcs \ 1283296580Sdavidcs product(intx, ParGCArrayScanChunk, 50, \ 1284296580Sdavidcs "Scan a subset and push remainder, if array is bigger than this") \ 1285296580Sdavidcs \ 1286296580Sdavidcs product(intx, ParGCDesiredObjsFromOverflowList, 20, \ 1287296580Sdavidcs "The desired number of objects to claim from the overflow list") \ 1288296580Sdavidcs \ 1289296580Sdavidcs product(uintx, CMSParPromoteBlocksToClaim, 50, \ 1290296580Sdavidcs "Number of blocks to attempt to claim when refilling CMS LAB for "\ 1291296580Sdavidcs "parallel GC.") \ 1292296580Sdavidcs \ 1293296580Sdavidcs product(bool, AlwaysPreTouch, false, \ 1294296580Sdavidcs "It forces all freshly committed pages to be pre-touched.") \ 1295296580Sdavidcs \ 1296296580Sdavidcs product(bool, CMSUseOldDefaults, false, \ 1297296580Sdavidcs "A flag temporarily introduced to allow reverting to some older" \ 1298296580Sdavidcs "default settings; older as of 6.0 ") \ 1299296580Sdavidcs \ 1300296580Sdavidcs product(intx, CMSYoungGenPerWorker, 16*M, \ 1301296580Sdavidcs "The amount of young gen chosen by default per GC worker " \ 1302296580Sdavidcs "thread available ") \ 1303296580Sdavidcs \ 1304296580Sdavidcs product(bool, GCOverheadReporting, false, \ 1305296580Sdavidcs "Enables the GC overhead reporting facility") \ 1306296580Sdavidcs \ 1307296580Sdavidcs product(intx, GCOverheadReportingPeriodMS, 100, \ 1308296580Sdavidcs "Reporting period for conc GC overhead reporting, in ms ") \ 1309296580Sdavidcs \ 1310296580Sdavidcs product(bool, CMSIncrementalMode, false, \ 1311296580Sdavidcs "Whether CMS GC should operate in \"incremental\" mode") \ 1312296580Sdavidcs \ 1313296580Sdavidcs product(uintx, CMSIncrementalDutyCycle, 10, \ 1314296580Sdavidcs "CMS incremental mode duty cycle (a percentage, 0-100). If" \ 1315296580Sdavidcs "CMSIncrementalPacing is enabled, then this is just the initial" \ 1316296580Sdavidcs "value") \ 1317296580Sdavidcs \ 1318296580Sdavidcs product(bool, CMSIncrementalPacing, true, \ 1319296580Sdavidcs "Whether the CMS incremental mode duty cycle should be " \ 1320296580Sdavidcs "automatically adjusted") \ 1321296580Sdavidcs \ 1322296580Sdavidcs product(uintx, CMSIncrementalDutyCycleMin, 0, \ 1323296580Sdavidcs "Lower bound on the duty cycle when CMSIncrementalPacing is" \ 1324255736Sdavidch "enabled (a percentage, 0-100).") \ 1325255736Sdavidch \ 1326255736Sdavidch product(uintx, CMSIncrementalSafetyFactor, 10, \ 1327255736Sdavidch "Percentage (0-100) used to add conservatism when computing the" \ 1328255736Sdavidch "duty cycle.") \ 1329255736Sdavidch \ 1330255736Sdavidch product(uintx, CMSIncrementalOffset, 0, \ 1331255736Sdavidch "Percentage (0-100) by which the CMS incremental mode duty cycle" \ 1332255736Sdavidch "is shifted to the right within the period between young GCs") \ 1333255736Sdavidch \ 1334255736Sdavidch product(uintx, CMSExpAvgFactor, 25, \ 1335255736Sdavidch "Percentage (0-100) used to weight the current sample when" \ 1336255736Sdavidch "computing exponential averages for CMS statistics.") \ 1337255736Sdavidch \ 1338255736Sdavidch product(uintx, CMS_FLSWeight, 50, \ 1339255736Sdavidch "Percentage (0-100) used to weight the current sample when" \ 1340255736Sdavidch "computing exponentially decating averages for CMS FLS statistics.") \ 1341255736Sdavidch \ 1342255736Sdavidch product(uintx, CMS_FLSPadding, 2, \ 1343255736Sdavidch "The multiple of deviation from mean to use for buffering" \ 1344255736Sdavidch "against volatility in free list demand.") \ 1345255736Sdavidch \ 1346255736Sdavidch product(uintx, FLSCoalescePolicy, 2, \ 1347255736Sdavidch "CMS: Aggression level for coalescing, increasing from 0 to 4") \ 1348255736Sdavidch \ 1349255736Sdavidch product(uintx, CMS_SweepWeight, 50, \ 1350255736Sdavidch "Percentage (0-100) used to weight the current sample when" \ 1351255736Sdavidch "computing exponentially decaying average for inter-sweep duration.") \ 1352255736Sdavidch \ 1353255736Sdavidch product(uintx, CMS_SweepPadding, 2, \ 1354255736Sdavidch "The multiple of deviation from mean to use for buffering" \ 1355255736Sdavidch "against volatility in inter-sweep duration.") \ 1356255736Sdavidch \ 1357255736Sdavidch product(uintx, CMS_SweepTimerThresholdMillis, 10, \ 1358255736Sdavidch "Skip block flux-rate sampling for an epoch unless inter-sweep " \ 1359255736Sdavidch " duration exceeds this threhold in milliseconds") \ 1360255736Sdavidch \ 1361255736Sdavidch develop(bool, CMSTraceIncrementalMode, false, \ 1362255736Sdavidch "Trace CMS incremental mode") \ 1363255736Sdavidch \ 1364255736Sdavidch develop(bool, CMSTraceIncrementalPacing, false, \ 1365255736Sdavidch "Trace CMS incremental mode pacing computation") \ 1366255736Sdavidch \ 1367255736Sdavidch develop(bool, CMSTraceThreadState, false, \ 1368255736Sdavidch "Trace the CMS thread state (enable the trace_state() method)") \ 1369255736Sdavidch \ 1370255736Sdavidch product(bool, CMSClassUnloadingEnabled, false, \ 1371255736Sdavidch "Whether class unloading enabled when using CMS GC") \ 1372255736Sdavidch \ 1373255736Sdavidch product(uintx, CMSClassUnloadingMaxInterval, 0, \ 1374255736Sdavidch "When CMS class unloading is enabled, the maximum CMS cycle count"\ 1375255736Sdavidch " for which classes may not be unloaded") \ 1376255736Sdavidch \ 1377255736Sdavidch product(bool, CMSCompactWhenClearAllSoftRefs, true, \ 1378255736Sdavidch "Compact when asked to collect CMS gen with clear_all_soft_refs") \ 1379255736Sdavidch \ 1380255736Sdavidch product(bool, UseCMSCompactAtFullCollection, true, \ 1381255736Sdavidch "Use mark sweep compact at full collections") \ 1382255736Sdavidch \ 1383255736Sdavidch product(uintx, CMSFullGCsBeforeCompaction, 0, \ 1384255736Sdavidch "Number of CMS full collection done before compaction if > 0") \ 1385255736Sdavidch \ 1386255736Sdavidch develop(intx, CMSDictionaryChoice, 0, \ 1387255736Sdavidch "Use BinaryTreeDictionary as default in the CMS generation") \ 1388255736Sdavidch \ 1389255736Sdavidch product(uintx, CMSIndexedFreeListReplenish, 4, \ 1390255736Sdavidch "Replenish and indexed free list with this number of chunks") \ 1391255736Sdavidch \ 1392255736Sdavidch product(bool, CMSLoopWarn, false, \ 1393255736Sdavidch "Warn in case of excessive CMS looping") \ 1394255736Sdavidch \ 1395255736Sdavidch develop(bool, CMSOverflowEarlyRestoration, false, \ 1396255736Sdavidch "Whether preserved marks should be restored early") \ 1397255736Sdavidch \ 1398255736Sdavidch product(uintx, CMSMarkStackSize, 32*K, \ 1399255736Sdavidch "Size of CMS marking stack") \ 1400255736Sdavidch \ 1401255736Sdavidch product(uintx, CMSMarkStackSizeMax, 4*M, \ 1402255736Sdavidch "Max size of CMS marking stack") \ 1403255736Sdavidch \ 1404255736Sdavidch notproduct(bool, CMSMarkStackOverflowALot, false, \ 1405255736Sdavidch "Whether we should simulate frequent marking stack / work queue" \ 1406255736Sdavidch " overflow") \ 1407255736Sdavidch \ 1408255736Sdavidch notproduct(intx, CMSMarkStackOverflowInterval, 1000, \ 1409255736Sdavidch "A per-thread `interval' counter that determines how frequently" \ 1410255736Sdavidch " we simulate overflow; a smaller number increases frequency") \ 1411255736Sdavidch \ 1412255736Sdavidch product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ 1413255736Sdavidch "(Temporary, subject to experimentation)" \ 1414255736Sdavidch "Maximum number of abortable preclean iterations, if > 0") \ 1415255736Sdavidch \ 1416255736Sdavidch product(intx, CMSMaxAbortablePrecleanTime, 5000, \ 1417255736Sdavidch "(Temporary, subject to experimentation)" \ 1418255736Sdavidch "Maximum time in abortable preclean in ms") \ 1419255736Sdavidch \ 1420255736Sdavidch product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ 1421255736Sdavidch "(Temporary, subject to experimentation)" \ 1422255736Sdavidch "Nominal minimum work per abortable preclean iteration") \ 1423255736Sdavidch \ 1424255736Sdavidch product(intx, CMSAbortablePrecleanWaitMillis, 100, \ 1425255736Sdavidch "(Temporary, subject to experimentation)" \ 1426255736Sdavidch " Time that we sleep between iterations when not given" \ 1427255736Sdavidch " enough work per iteration") \ 1428255736Sdavidch \ 1429255736Sdavidch product(uintx, CMSRescanMultiple, 32, \ 1430255736Sdavidch "Size (in cards) of CMS parallel rescan task") \ 1431255736Sdavidch \ 1432255736Sdavidch product(uintx, CMSConcMarkMultiple, 32, \ 1433255736Sdavidch "Size (in cards) of CMS concurrent MT marking task") \ 1434255736Sdavidch \ 1435255736Sdavidch product(uintx, CMSRevisitStackSize, 1*M, \ 1436255736Sdavidch "Size of CMS KlassKlass revisit stack") \ 1437255736Sdavidch \ 1438255736Sdavidch product(bool, CMSAbortSemantics, false, \ 1439255736Sdavidch "Whether abort-on-overflow semantics is implemented") \ 1440255736Sdavidch \ 1441255736Sdavidch product(bool, CMSParallelRemarkEnabled, true, \ 1442255736Sdavidch "Whether parallel remark enabled (only if ParNewGC)") \ 1443255736Sdavidch \ 1444255736Sdavidch product(bool, CMSParallelSurvivorRemarkEnabled, true, \ 1445255736Sdavidch "Whether parallel remark of survivor space" \ 1446255736Sdavidch " enabled (effective only if CMSParallelRemarkEnabled)") \ 1447255736Sdavidch \ 1448255736Sdavidch product(bool, CMSPLABRecordAlways, true, \ 1449255736Sdavidch "Whether to always record survivor space PLAB bdries" \ 1450255736Sdavidch " (effective only if CMSParallelSurvivorRemarkEnabled)") \ 1451255736Sdavidch \ 1452255736Sdavidch product(bool, CMSConcurrentMTEnabled, true, \ 1453255736Sdavidch "Whether multi-threaded concurrent work enabled (if ParNewGC)") \ 1454255736Sdavidch \ 1455255736Sdavidch product(bool, CMSPermGenPrecleaningEnabled, true, \ 1456255736Sdavidch "Whether concurrent precleaning enabled in perm gen" \ 1457255736Sdavidch " (effective only when CMSPrecleaningEnabled is true)") \ 1458255736Sdavidch \ 1459255736Sdavidch product(bool, CMSPrecleaningEnabled, true, \ 1460255736Sdavidch "Whether concurrent precleaning enabled") \ 1461255736Sdavidch \ 1462255736Sdavidch product(uintx, CMSPrecleanIter, 3, \ 1463255736Sdavidch "Maximum number of precleaning iteration passes") \ 1464255736Sdavidch \ 1465255736Sdavidch product(uintx, CMSPrecleanNumerator, 2, \ 1466255736Sdavidch "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ 1467255736Sdavidch " ratio") \ 1468255736Sdavidch \ 1469255736Sdavidch product(uintx, CMSPrecleanDenominator, 3, \ 1470255736Sdavidch "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ 1471255736Sdavidch " ratio") \ 1472255736Sdavidch \ 1473255736Sdavidch product(bool, CMSPrecleanRefLists1, true, \ 1474255736Sdavidch "Preclean ref lists during (initial) preclean phase") \ 1475255736Sdavidch \ 1476255736Sdavidch product(bool, CMSPrecleanRefLists2, false, \ 1477255736Sdavidch "Preclean ref lists during abortable preclean phase") \ 1478255736Sdavidch \ 1479255736Sdavidch product(bool, CMSPrecleanSurvivors1, false, \ 1480255736Sdavidch "Preclean survivors during (initial) preclean phase") \ 1481296580Sdavidcs \ 1482296580Sdavidcs product(bool, CMSPrecleanSurvivors2, true, \ 1483296580Sdavidcs "Preclean survivors during abortable preclean phase") \ 1484296580Sdavidcs \ 1485296580Sdavidcs product(uintx, CMSPrecleanThreshold, 1000, \ 1486296580Sdavidcs "Don't re-iterate if #dirty cards less than this") \ 1487296580Sdavidcs \ 1488296580Sdavidcs product(bool, CMSCleanOnEnter, true, \ 1489296580Sdavidcs "Clean-on-enter optimization for reducing number of dirty cards") \ 1490296580Sdavidcs \ 1491296580Sdavidcs product(uintx, CMSRemarkVerifyVariant, 1, \ 1492296580Sdavidcs "Choose variant (1,2) of verification following remark") \ 1493296580Sdavidcs \ 1494296580Sdavidcs product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ 1495296580Sdavidcs "If Eden used is below this value, don't try to schedule remark") \ 1496296580Sdavidcs \ 1497296580Sdavidcs product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ 1498296580Sdavidcs "The Eden occupancy % at which to try and schedule remark pause") \ 1499296580Sdavidcs \ 1500296580Sdavidcs product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ 1501255736Sdavidch "Start sampling Eden top at least before yg occupancy reaches" \ 1502255736Sdavidch " 1/<ratio> of the size at which we plan to schedule remark") \ 1503255736Sdavidch \ 1504255736Sdavidch product(uintx, CMSSamplingGrain, 16*K, \ 1505255736Sdavidch "The minimum distance between eden samples for CMS (see above)") \ 1506255736Sdavidch \ 1507255736Sdavidch product(bool, CMSScavengeBeforeRemark, false, \ 1508255736Sdavidch "Attempt scavenge before the CMS remark step") \ 1509255736Sdavidch \ 1510255736Sdavidch develop(bool, CMSTraceSweeper, false, \ 1511255736Sdavidch "Trace some actions of the CMS sweeper") \ 1512255736Sdavidch \ 1513255736Sdavidch product(uintx, CMSWorkQueueDrainThreshold, 10, \ 1514255736Sdavidch "Don't drain below this size per parallel worker/thief") \ 1515255736Sdavidch \ 1516255736Sdavidch product(intx, CMSWaitDuration, 2000, \ 1517255736Sdavidch "Time in milliseconds that CMS thread waits for young GC") \ 1518255736Sdavidch \ 1519255736Sdavidch product(bool, CMSYield, true, \ 1520255736Sdavidch "Yield between steps of concurrent mark & sweep") \ 1521255736Sdavidch \ 1522255736Sdavidch product(uintx, CMSBitMapYieldQuantum, 10*M, \ 1523255736Sdavidch "Bitmap operations should process at most this many bits" \ 1524255736Sdavidch "between yields") \ 1525255736Sdavidch \ 1526255736Sdavidch diagnostic(bool, FLSVerifyAllHeapReferences, false, \ 1527255736Sdavidch "Verify that all refs across the FLS boundary " \ 1528255736Sdavidch " are to valid objects") \ 1529255736Sdavidch \ 1530255736Sdavidch diagnostic(bool, FLSVerifyLists, false, \ 1531255736Sdavidch "Do lots of (expensive) FreeListSpace verification") \ 1532255736Sdavidch \ 1533255736Sdavidch diagnostic(bool, FLSVerifyIndexTable, false, \ 1534255736Sdavidch "Do lots of (expensive) FLS index table verification") \ 1535255736Sdavidch \ 1536255736Sdavidch develop(bool, FLSVerifyDictionary, false, \ 1537255736Sdavidch "Do lots of (expensive) FLS dictionary verification") \ 1538255736Sdavidch \ 1539255736Sdavidch develop(bool, VerifyBlockOffsetArray, false, \ 1540255736Sdavidch "Do (expensive!) block offset array verification") \ 1541255736Sdavidch \ 1542255736Sdavidch product(bool, BlockOffsetArrayUseUnallocatedBlock, trueInDebug, \ 1543255736Sdavidch "Maintain _unallocated_block in BlockOffsetArray" \ 1544255736Sdavidch " (currently applicable only to CMS collector)") \ 1545255736Sdavidch \ 1546255736Sdavidch develop(bool, TraceCMSState, false, \ 1547255736Sdavidch "Trace the state of the CMS collection") \ 1548255736Sdavidch \ 1549255736Sdavidch product(intx, RefDiscoveryPolicy, 0, \ 1550255736Sdavidch "Whether reference-based(0) or referent-based(1)") \ 1551255736Sdavidch \ 1552255736Sdavidch product(bool, ParallelRefProcEnabled, false, \ 1553255736Sdavidch "Enable parallel reference processing whenever possible") \ 1554255736Sdavidch \ 1555255736Sdavidch product(bool, ParallelRefProcBalancingEnabled, true, \ 1556255736Sdavidch "Enable balancing of reference processing queues") \ 1557255736Sdavidch \ 1558255736Sdavidch product(intx, CMSTriggerRatio, 80, \ 1559255736Sdavidch "Percentage of MinHeapFreeRatio in CMS generation that is " \ 1560255736Sdavidch " allocated before a CMS collection cycle commences") \ 1561255736Sdavidch \ 1562255736Sdavidch product(intx, CMSTriggerPermRatio, 80, \ 1563255736Sdavidch "Percentage of MinHeapFreeRatio in the CMS perm generation that" \ 1564255736Sdavidch " is allocated before a CMS collection cycle commences, that " \ 1565255736Sdavidch " also collects the perm generation") \ 1566255736Sdavidch \ 1567255736Sdavidch product(uintx, CMSBootstrapOccupancy, 50, \ 1568255736Sdavidch "Percentage CMS generation occupancy at which to " \ 1569255736Sdavidch " initiate CMS collection for bootstrapping collection stats") \ 1570255736Sdavidch \ 1571255736Sdavidch product(intx, CMSInitiatingOccupancyFraction, -1, \ 1572255736Sdavidch "Percentage CMS generation occupancy to start a CMS collection " \ 1573255736Sdavidch " cycle (A negative value means that CMSTriggerRatio is used)") \ 1574255736Sdavidch \ 1575255736Sdavidch product(intx, CMSInitiatingPermOccupancyFraction, -1, \ 1576255736Sdavidch "Percentage CMS perm generation occupancy to start a CMScollection"\ 1577255736Sdavidch " cycle (A negative value means that CMSTriggerPermRatio is used)")\ 1578255736Sdavidch \ 1579255736Sdavidch product(bool, UseCMSInitiatingOccupancyOnly, false, \ 1580255736Sdavidch "Only use occupancy as a crierion for starting a CMS collection") \ 1581255736Sdavidch \ 1582255736Sdavidch product(intx, CMSIsTooFullPercentage, 98, \ 1583255736Sdavidch "An absolute ceiling above which CMS will always consider the" \ 1584255736Sdavidch " perm gen ripe for collection") \ 1585255736Sdavidch \ 1586255736Sdavidch develop(bool, CMSTestInFreeList, false, \ 1587255736Sdavidch "Check if the coalesced range is already in the " \ 1588255736Sdavidch "free lists as claimed.") \ 1589255736Sdavidch \ 1590255736Sdavidch notproduct(bool, CMSVerifyReturnedBytes, false, \ 1591255736Sdavidch "Check that all the garbage collected was returned to the " \ 1592255736Sdavidch "free lists.") \ 1593255736Sdavidch \ 1594255736Sdavidch notproduct(bool, ScavengeALot, false, \ 1595255736Sdavidch "Force scavenge at every Nth exit from the runtime system " \ 1596255736Sdavidch "(N=ScavengeALotInterval)") \ 1597255736Sdavidch \ 1598255736Sdavidch develop(bool, FullGCALot, false, \ 1599255736Sdavidch "Force full gc at every Nth exit from the runtime system " \ 1600255736Sdavidch "(N=FullGCALotInterval)") \ 1601255736Sdavidch \ 1602255736Sdavidch notproduct(bool, GCALotAtAllSafepoints, false, \ 1603255736Sdavidch "Enforce ScavengeALot/GCALot at all potential safepoints") \ 1604255736Sdavidch \ 1605255736Sdavidch product(bool, HandlePromotionFailure, true, \ 1606255736Sdavidch "The youngest generation collection does not require" \ 1607255736Sdavidch " a guarantee of full promotion of all live objects.") \ 1608255736Sdavidch \ 1609255736Sdavidch notproduct(bool, PromotionFailureALot, false, \ 1610255736Sdavidch "Use promotion failure handling on every youngest generation " \ 1611255736Sdavidch "collection") \ 1612255736Sdavidch \ 1613255736Sdavidch develop(uintx, PromotionFailureALotCount, 1000, \ 1614255736Sdavidch "Number of promotion failures occurring at ParGCAllocBuffer" \ 1615255736Sdavidch "refill attempts (ParNew) or promotion attempts " \ 1616255736Sdavidch "(other young collectors) ") \ 1617255736Sdavidch \ 1618255736Sdavidch develop(uintx, PromotionFailureALotInterval, 5, \ 1619255736Sdavidch "Total collections between promotion failures alot") \ 1620255736Sdavidch \ 1621255736Sdavidch develop(intx, WorkStealingSleepMillis, 1, \ 1622255736Sdavidch "Sleep time when sleep is used for yields") \ 1623255736Sdavidch \ 1624255736Sdavidch develop(uintx, WorkStealingYieldsBeforeSleep, 1000, \ 1625255736Sdavidch "Number of yields before a sleep is done during workstealing") \ 1626255736Sdavidch \ 1627255736Sdavidch product(uintx, PreserveMarkStackSize, 40, \ 1628255736Sdavidch "Size for stack used in promotion failure handling") \ 1629255736Sdavidch \ 1630255736Sdavidch product_pd(bool, UseTLAB, "Use thread-local object allocation") \ 1631255736Sdavidch \ 1632255736Sdavidch product_pd(bool, ResizeTLAB, \ 1633255736Sdavidch "Dynamically resize tlab size for threads") \ 1634255736Sdavidch \ 1635255736Sdavidch product(bool, ZeroTLAB, false, \ 1636255736Sdavidch "Zero out the newly created TLAB") \ 1637255736Sdavidch \ 1638255736Sdavidch product(bool, FastTLABRefill, true, \ 1639255736Sdavidch "Use fast TLAB refill code") \ 1640255736Sdavidch \ 1641255736Sdavidch product(bool, PrintTLAB, false, \ 1642255736Sdavidch "Print various TLAB related information") \ 1643255736Sdavidch \ 1644255736Sdavidch product(bool, TLABStats, true, \ 1645255736Sdavidch "Print various TLAB related information") \ 1646255736Sdavidch \ 1647255736Sdavidch product_pd(bool, NeverActAsServerClassMachine, \ 1648255736Sdavidch "Never act like a server-class machine") \ 1649255736Sdavidch \ 1650255736Sdavidch product(bool, AlwaysActAsServerClassMachine, false, \ 1651255736Sdavidch "Always act like a server-class machine") \ 1652255736Sdavidch \ 1653255736Sdavidch product_pd(uintx, DefaultMaxRAM, \ 1654255736Sdavidch "Maximum real memory size for setting server class heap size") \ 1655255736Sdavidch \ 1656255736Sdavidch product(uintx, DefaultMaxRAMFraction, 4, \ 1657255736Sdavidch "Fraction (1/n) of real memory used for server class max heap") \ 1658255736Sdavidch \ 1659255736Sdavidch product(uintx, DefaultInitialRAMFraction, 64, \ 1660255736Sdavidch "Fraction (1/n) of real memory used for server class initial heap") \ 1661255736Sdavidch \ 1662255736Sdavidch product(bool, UseAutoGCSelectPolicy, false, \ 1663255736Sdavidch "Use automatic collection selection policy") \ 1664255736Sdavidch \ 1665255736Sdavidch product(uintx, AutoGCSelectPauseMillis, 5000, \ 1666255736Sdavidch "Automatic GC selection pause threshhold in ms") \ 1667255736Sdavidch \ 1668255736Sdavidch product(bool, UseAdaptiveSizePolicy, true, \ 1669255736Sdavidch "Use adaptive generation sizing policies") \ 1670255736Sdavidch \ 1671255736Sdavidch product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \ 1672255736Sdavidch "Use adaptive survivor sizing policies") \ 1673255736Sdavidch \ 1674255736Sdavidch product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \ 1675255736Sdavidch "Use adaptive young-old sizing policies at minor collections") \ 1676255736Sdavidch \ 1677255736Sdavidch product(bool, UseAdaptiveGenerationSizePolicyAtMajorCollection, true, \ 1678255736Sdavidch "Use adaptive young-old sizing policies at major collections") \ 1679255736Sdavidch \ 1680255736Sdavidch product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ 1681255736Sdavidch "Use statistics from System.GC for adaptive size policy") \ 1682255736Sdavidch \ 1683255736Sdavidch product(bool, UseAdaptiveGCBoundary, false, \ 1684255736Sdavidch "Allow young-old boundary to move") \ 1685255736Sdavidch \ 1686255736Sdavidch develop(bool, TraceAdaptiveGCBoundary, false, \ 1687255736Sdavidch "Trace young-old boundary moves") \ 1688255736Sdavidch \ 1689255736Sdavidch develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1, \ 1690255736Sdavidch "Resize the virtual spaces of the young or old generations") \ 1691255736Sdavidch \ 1692255736Sdavidch product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ 1693255736Sdavidch "Policy for changeing generation size for throughput goals") \ 1694255736Sdavidch \ 1695255736Sdavidch product(uintx, AdaptiveSizePausePolicy, 0, \ 1696255736Sdavidch "Policy for changing generation size for pause goals") \ 1697255736Sdavidch \ 1698255736Sdavidch develop(bool, PSAdjustTenuredGenForMinorPause, false, \ 1699255736Sdavidch "Adjust tenured generation to achive a minor pause goal") \ 1700255736Sdavidch \ 1701255736Sdavidch develop(bool, PSAdjustYoungGenForMajorPause, false, \ 1702255736Sdavidch "Adjust young generation to achive a major pause goal") \ 1703255736Sdavidch \ 1704255736Sdavidch product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ 1705255736Sdavidch "Number of steps where heuristics is used before data is used") \ 1706255736Sdavidch \ 1707255736Sdavidch develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \ 1708255736Sdavidch "Number of collections before the adaptive sizing is started") \ 1709255736Sdavidch \ 1710255736Sdavidch product(uintx, AdaptiveSizePolicyOutputInterval, 0, \ 1711255736Sdavidch "Collecton interval for printing information, zero => never") \ 1712255736Sdavidch \ 1713255736Sdavidch product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \ 1714255736Sdavidch "Use adaptive minimum footprint as a goal") \ 1715255736Sdavidch \ 1716255736Sdavidch product(uintx, AdaptiveSizePolicyWeight, 10, \ 1717255736Sdavidch "Weight given to exponential resizing, between 0 and 100") \ 1718255736Sdavidch \ 1719255736Sdavidch product(uintx, AdaptiveTimeWeight, 25, \ 1720255736Sdavidch "Weight given to time in adaptive policy, between 0 and 100") \ 1721255736Sdavidch \ 1722255736Sdavidch product(uintx, PausePadding, 1, \ 1723255736Sdavidch "How much buffer to keep for pause time") \ 1724255736Sdavidch \ 1725255736Sdavidch product(uintx, PromotedPadding, 3, \ 1726255736Sdavidch "How much buffer to keep for promotion failure") \ 1727255736Sdavidch \ 1728255736Sdavidch product(uintx, SurvivorPadding, 3, \ 1729255736Sdavidch "How much buffer to keep for survivor overflow") \ 1730255736Sdavidch \ 1731255736Sdavidch product(uintx, AdaptivePermSizeWeight, 20, \ 1732255736Sdavidch "Weight for perm gen exponential resizing, between 0 and 100") \ 1733255736Sdavidch \ 1734255736Sdavidch product(uintx, PermGenPadding, 3, \ 1735255736Sdavidch "How much buffer to keep for perm gen sizing") \ 1736255736Sdavidch \ 1737255736Sdavidch product(uintx, ThresholdTolerance, 10, \ 1738255736Sdavidch "Allowed collection cost difference between generations") \ 1739255736Sdavidch \ 1740255736Sdavidch product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \ 1741255736Sdavidch "If collection costs are within margin, reduce both by full delta") \ 1742255736Sdavidch \ 1743255736Sdavidch product(uintx, YoungGenerationSizeIncrement, 20, \ 1744255736Sdavidch "Adaptive size percentage change in young generation") \ 1745255736Sdavidch \ 1746255736Sdavidch product(uintx, YoungGenerationSizeSupplement, 80, \ 1747255736Sdavidch "Supplement to YoungedGenerationSizeIncrement used at startup") \ 1748255736Sdavidch \ 1749255736Sdavidch product(uintx, YoungGenerationSizeSupplementDecay, 8, \ 1750255736Sdavidch "Decay factor to YoungedGenerationSizeSupplement") \ 1751255736Sdavidch \ 1752255736Sdavidch product(uintx, TenuredGenerationSizeIncrement, 20, \ 1753255736Sdavidch "Adaptive size percentage change in tenured generation") \ 1754255736Sdavidch \ 1755255736Sdavidch product(uintx, TenuredGenerationSizeSupplement, 80, \ 1756255736Sdavidch "Supplement to TenuredGenerationSizeIncrement used at startup") \ 1757255736Sdavidch \ 1758255736Sdavidch product(uintx, TenuredGenerationSizeSupplementDecay, 2, \ 1759255736Sdavidch "Decay factor to TenuredGenerationSizeIncrement") \ 1760255736Sdavidch \ 1761255736Sdavidch product(uintx, MaxGCPauseMillis, max_uintx, \ 1762255736Sdavidch "Adaptive size policy maximum GC pause time goal in msec") \ 1763255736Sdavidch \ 1764255736Sdavidch product(uintx, MaxGCMinorPauseMillis, max_uintx, \ 1765255736Sdavidch "Adaptive size policy maximum GC minor pause time goal in msec") \ 1766255736Sdavidch \ 1767255736Sdavidch product(uintx, GCTimeRatio, 99, \ 1768255736Sdavidch "Adaptive size policy application time to GC time ratio") \ 1769255736Sdavidch \ 1770255736Sdavidch product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \ 1771255736Sdavidch "Adaptive size scale down factor for shrinking") \ 1772255736Sdavidch \ 1773255736Sdavidch product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \ 1774255736Sdavidch "Adaptive size decays the major cost for long major intervals") \ 1775255736Sdavidch \ 1776255736Sdavidch product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \ 1777258203Sedavis "Time scale over which major costs decay") \ 1778255736Sdavidch \ 1779255736Sdavidch product(uintx, MinSurvivorRatio, 3, \ 1780255736Sdavidch "Minimum ratio of young generation/survivor space size") \ 1781255736Sdavidch \ 1782255736Sdavidch product(uintx, InitialSurvivorRatio, 8, \ 1783255736Sdavidch "Initial ratio of eden/survivor space size") \ 1784255736Sdavidch \ 1785255736Sdavidch product(uintx, BaseFootPrintEstimate, 256*M, \ 1786255736Sdavidch "Estimate of footprint other than Java Heap") \ 1787255736Sdavidch \ 1788255736Sdavidch product(bool, UseGCOverheadLimit, true, \ 1789255736Sdavidch "Use policy to limit of proportion of time spent in GC " \ 1790255736Sdavidch "before an OutOfMemory error is thrown") \ 1791255736Sdavidch \ 1792255736Sdavidch product(uintx, GCTimeLimit, 98, \ 1793255736Sdavidch "Limit of proportion of time spent in GC before an OutOfMemory" \ 1794255736Sdavidch "error is thrown (used with GCHeapFreeLimit)") \ 1795255736Sdavidch \ 1796255736Sdavidch product(uintx, GCHeapFreeLimit, 2, \ 1797255736Sdavidch "Minimum percentage of free space after a full GC before an " \ 1798255736Sdavidch "OutOfMemoryError is thrown (used with GCTimeLimit)") \ 1799255736Sdavidch \ 1800255736Sdavidch develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5, \ 1801258203Sedavis "Number of consecutive collections before gc time limit fires") \ 1802258203Sedavis \ 1803258203Sedavis product(bool, PrintAdaptiveSizePolicy, false, \ 1804258203Sedavis "Print information about AdaptiveSizePolicy") \ 1805258203Sedavis \ 1806255736Sdavidch product(intx, PrefetchCopyIntervalInBytes, -1, \ 1807258203Sedavis "How far ahead to prefetch destination area (<= 0 means off)") \ 1808255736Sdavidch \ 1809255736Sdavidch product(intx, PrefetchScanIntervalInBytes, -1, \ 1810255736Sdavidch "How far ahead to prefetch scan area (<= 0 means off)") \ 1811255736Sdavidch \ 1812258203Sedavis product(intx, PrefetchFieldsAhead, -1, \ 1813258203Sedavis "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ 1814255736Sdavidch \ 1815255736Sdavidch develop(bool, UsePrefetchQueue, true, \ 1816255736Sdavidch "Use the prefetch queue during PS promotion") \ 1817255736Sdavidch \ 1818255736Sdavidch diagnostic(bool, VerifyBeforeExit, trueInDebug, \ 1819255736Sdavidch "Verify system before exiting") \ 1820255736Sdavidch \ 1821255736Sdavidch diagnostic(bool, VerifyBeforeGC, false, \ 1822255736Sdavidch "Verify memory system before GC") \ 1823255736Sdavidch \ 1824255736Sdavidch diagnostic(bool, VerifyAfterGC, false, \ 1825255736Sdavidch "Verify memory system after GC") \ 1826255736Sdavidch \ 1827255736Sdavidch diagnostic(bool, VerifyDuringGC, false, \ 1828255736Sdavidch "Verify memory system during GC (between phases)") \ 1829255736Sdavidch \ 1830255736Sdavidch diagnostic(bool, VerifyRememberedSets, false, \ 1831255736Sdavidch "Verify GC remembered sets") \ 1832255736Sdavidch \ 1833255736Sdavidch diagnostic(bool, VerifyObjectStartArray, true, \ 1834255736Sdavidch "Verify GC object start array if verify before/after") \ 1835255736Sdavidch \ 1836255736Sdavidch product(bool, DisableExplicitGC, false, \ 1837255736Sdavidch "Tells whether calling System.gc() does a full GC") \ 1838255736Sdavidch \ 1839255736Sdavidch notproduct(bool, CheckMemoryInitialization, false, \ 1840255736Sdavidch "Checks memory initialization") \ 1841255736Sdavidch \ 1842255736Sdavidch product(bool, CollectGen0First, false, \ 1843255736Sdavidch "Collect youngest generation before each full GC") \ 1844255736Sdavidch \ 1845255736Sdavidch diagnostic(bool, BindCMSThreadToCPU, false, \ 1846255736Sdavidch "Bind CMS Thread to CPU if possible") \ 1847255736Sdavidch \ 1848255736Sdavidch diagnostic(uintx, CPUForCMSThread, 0, \ 1849255736Sdavidch "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \ 1850255736Sdavidch \ 1851255736Sdavidch product(bool, BindGCTaskThreadsToCPUs, false, \ 1852255736Sdavidch "Bind GCTaskThreads to CPUs if possible") \ 1853255736Sdavidch \ 1854255736Sdavidch product(bool, UseGCTaskAffinity, false, \ 1855255736Sdavidch "Use worker affinity when asking for GCTasks") \ 1856255736Sdavidch \ 1857255736Sdavidch product(uintx, ProcessDistributionStride, 4, \ 1858255736Sdavidch "Stride through processors when distributing processes") \ 1859255736Sdavidch \ 1860255736Sdavidch product(uintx, CMSCoordinatorYieldSleepCount, 10, \ 1861255736Sdavidch "number of times the coordinator GC thread will sleep while " \ 1862255736Sdavidch "yielding before giving up and resuming GC") \ 1863255736Sdavidch \ 1864255736Sdavidch product(uintx, CMSYieldSleepCount, 0, \ 1865255736Sdavidch "number of times a GC thread (minus the coordinator) " \ 1866255736Sdavidch "will sleep while yielding before giving up and resuming GC") \ 1867255736Sdavidch \ 1868255736Sdavidch notproduct(bool, PrintFlagsFinal, false, \ 1869255736Sdavidch "Print all command line flags after argument processing") \ 1870255736Sdavidch \ 1871255736Sdavidch /* gc tracing */ \ 1872255736Sdavidch manageable(bool, PrintGC, false, \ 1873255736Sdavidch "Print message at garbage collect") \ 1874255736Sdavidch \ 1875255736Sdavidch manageable(bool, PrintGCDetails, false, \ 1876255736Sdavidch "Print more details at garbage collect") \ 1877255736Sdavidch \ 1878255736Sdavidch manageable(bool, PrintGCDateStamps, false, \ 1879255736Sdavidch "Print date stamps at garbage collect") \ 1880255736Sdavidch \ 1881255736Sdavidch manageable(bool, PrintGCTimeStamps, false, \ 1882255736Sdavidch "Print timestamps at garbage collect") \ 1883255736Sdavidch \ 1884255736Sdavidch product(bool, PrintGCTaskTimeStamps, false, \ 1885255736Sdavidch "Print timestamps for individual gc worker thread tasks") \ 1886255736Sdavidch \ 1887255736Sdavidch develop(intx, ConcGCYieldTimeout, 0, \ 1888255736Sdavidch "If non-zero, assert that GC threads yield within this # of ms.") \ 1889255736Sdavidch \ 1890255736Sdavidch notproduct(bool, TraceMarkSweep, false, \ 1891255736Sdavidch "Trace mark sweep") \ 1892255736Sdavidch \ 1893255736Sdavidch product(bool, PrintReferenceGC, false, \ 1894255736Sdavidch "Print times spent handling reference objects during GC " \ 1895255736Sdavidch " (enabled only when PrintGCDetails)") \ 1896255736Sdavidch \ 1897255736Sdavidch develop(bool, TraceReferenceGC, false, \ 1898255736Sdavidch "Trace handling of soft/weak/final/phantom references") \ 1899255736Sdavidch \ 1900255736Sdavidch develop(bool, TraceFinalizerRegistration, false, \ 1901255736Sdavidch "Trace registration of final references") \ 1902255736Sdavidch \ 1903255736Sdavidch notproduct(bool, TraceScavenge, false, \ 1904255736Sdavidch "Trace scavenge") \ 1905255736Sdavidch \ 1906255736Sdavidch product_rw(bool, TraceClassLoading, false, \ 1907255736Sdavidch "Trace all classes loaded") \ 1908255736Sdavidch \ 1909255736Sdavidch product(bool, TraceClassLoadingPreorder, false, \ 1910255736Sdavidch "Trace all classes loaded in order referenced (not loaded)") \ 1911255736Sdavidch \ 1912255736Sdavidch product_rw(bool, TraceClassUnloading, false, \ 1913255736Sdavidch "Trace unloading of classes") \ 1914255736Sdavidch \ 1915255736Sdavidch product_rw(bool, TraceLoaderConstraints, false, \ 1916255736Sdavidch "Trace loader constraints") \ 1917255736Sdavidch \ 1918255736Sdavidch product(bool, TraceGen0Time, false, \ 1919255736Sdavidch "Trace accumulated time for Gen 0 collection") \ 1920255736Sdavidch \ 1921255736Sdavidch product(bool, TraceGen1Time, false, \ 1922255736Sdavidch "Trace accumulated time for Gen 1 collection") \ 1923255736Sdavidch \ 1924255736Sdavidch product(bool, PrintTenuringDistribution, false, \ 1925255736Sdavidch "Print tenuring age information") \ 1926255736Sdavidch \ 1927255736Sdavidch product_rw(bool, PrintHeapAtGC, false, \ 1928255736Sdavidch "Print heap layout before and after each GC") \ 1929255736Sdavidch \ 1930255736Sdavidch product(bool, PrintHeapAtSIGBREAK, true, \ 1931255736Sdavidch "Print heap layout in response to SIGBREAK") \ 1932255736Sdavidch \ 1933255736Sdavidch manageable(bool, PrintClassHistogram, false, \ 1934255736Sdavidch "Print a histogram of class instances") \ 1935255736Sdavidch \ 1936255736Sdavidch develop(bool, TraceWorkGang, false, \ 1937255736Sdavidch "Trace activities of work gangs") \ 1938255736Sdavidch \ 1939255736Sdavidch product(bool, TraceParallelOldGCTasks, false, \ 1940255736Sdavidch "Trace multithreaded GC activity") \ 1941255736Sdavidch \ 1942255736Sdavidch develop(bool, TraceBlockOffsetTable, false, \ 1943255736Sdavidch "Print BlockOffsetTable maps") \ 1944255736Sdavidch \ 1945255736Sdavidch develop(bool, TraceCardTableModRefBS, false, \ 1946255736Sdavidch "Print CardTableModRefBS maps") \ 1947255736Sdavidch \ 1948255736Sdavidch develop(bool, TraceGCTaskManager, false, \ 1949255736Sdavidch "Trace actions of the GC task manager") \ 1950255736Sdavidch \ 1951255736Sdavidch develop(bool, TraceGCTaskQueue, false, \ 1952255736Sdavidch "Trace actions of the GC task queues") \ 1953255736Sdavidch \ 1954255736Sdavidch develop(bool, TraceGCTaskThread, false, \ 1955255736Sdavidch "Trace actions of the GC task threads") \ 1956255736Sdavidch \ 1957255736Sdavidch product(bool, PrintParallelOldGCPhaseTimes, false, \ 1958255736Sdavidch "Print the time taken by each parallel old gc phase." \ 1959255736Sdavidch "PrintGCDetails must also be enabled.") \ 1960255736Sdavidch \ 1961255736Sdavidch develop(bool, TraceParallelOldGCMarkingPhase, false, \ 1962255736Sdavidch "Trace parallel old gc marking phase") \ 1963255736Sdavidch \ 1964255736Sdavidch develop(bool, TraceParallelOldGCSummaryPhase, false, \ 1965255736Sdavidch "Trace parallel old gc summary phase") \ 1966255736Sdavidch \ 1967255736Sdavidch develop(bool, TraceParallelOldGCCompactionPhase, false, \ 1968255736Sdavidch "Trace parallel old gc compaction phase") \ 1969255736Sdavidch \ 1970255736Sdavidch develop(bool, TraceParallelOldGCDensePrefix, false, \ 1971255736Sdavidch "Trace parallel old gc dense prefix computation") \ 1972258203Sedavis \ 1973255736Sdavidch develop(bool, IgnoreLibthreadGPFault, false, \ 1974255736Sdavidch "Suppress workaround for libthread GP fault") \ 1975255736Sdavidch \ 1976255736Sdavidch product(bool, PrintJNIGCStalls, false, \ 1977255736Sdavidch "Print diagnostic message when GC is stalled" \ 1978255736Sdavidch "by JNI critical section") \ 1979255736Sdavidch \ 1980255736Sdavidch /* JVMTI heap profiling */ \ 1981255736Sdavidch \ 1982255736Sdavidch diagnostic(bool, TraceJVMTIObjectTagging, false, \ 1983255736Sdavidch "Trace JVMTI object tagging calls") \ 1984255736Sdavidch \ 1985255736Sdavidch diagnostic(bool, VerifyBeforeIteration, false, \ 1986255736Sdavidch "Verify memory system before JVMTI iteration") \ 1987255736Sdavidch \ 1988255736Sdavidch /* compiler interface */ \ 1989255736Sdavidch \ 1990255736Sdavidch develop(bool, CIPrintCompilerName, false, \ 1991255736Sdavidch "when CIPrint is active, print the name of the active compiler") \ 1992255736Sdavidch \ 1993255736Sdavidch develop(bool, CIPrintCompileQueue, false, \ 1994255736Sdavidch "display the contents of the compile queue whenever a " \ 1995255736Sdavidch "compilation is enqueued") \ 1996255736Sdavidch \ 1997255736Sdavidch develop(bool, CIPrintRequests, false, \ 1998255736Sdavidch "display every request for compilation") \ 1999255736Sdavidch \ 2000255736Sdavidch product(bool, CITime, false, \ 2001255736Sdavidch "collect timing information for compilation") \ 2002255736Sdavidch \ 2003255736Sdavidch develop(bool, CITimeEach, false, \ 2004255736Sdavidch "display timing information after each successful compilation") \ 2005255736Sdavidch \ 2006255736Sdavidch develop(bool, CICountOSR, true, \ 2007255736Sdavidch "use a separate counter when assigning ids to osr compilations") \ 2008255736Sdavidch \ 2009255736Sdavidch develop(bool, CICompileNatives, true, \ 2010255736Sdavidch "compile native methods if supported by the compiler") \ 2011255736Sdavidch \ 2012255736Sdavidch develop_pd(bool, CICompileOSR, \ 2013255736Sdavidch "compile on stack replacement methods if supported by the " \ 2014255736Sdavidch "compiler") \ 2015296580Sdavidcs \ 2016296580Sdavidcs develop(bool, CIPrintMethodCodes, false, \ 2017255736Sdavidch "print method bytecodes of the compiled code") \ 2018296580Sdavidcs \ 2019296580Sdavidcs develop(bool, CIPrintTypeFlow, false, \ 2020255736Sdavidch "print the results of ciTypeFlow analysis") \ 2021255736Sdavidch \ 2022255736Sdavidch develop(bool, CITraceTypeFlow, false, \ 2023255736Sdavidch "detailed per-bytecode tracing of ciTypeFlow analysis") \ 2024255736Sdavidch \ 2025255736Sdavidch develop(intx, CICloneLoopTestLimit, 100, \ 2026255736Sdavidch "size limit for blocks heuristically cloned in ciTypeFlow") \ 2027255736Sdavidch \ 2028255736Sdavidch /* temp diagnostics */ \ 2029255736Sdavidch \ 2030255736Sdavidch diagnostic(bool, TraceRedundantCompiles, false, \ 2031255736Sdavidch "Have compile broker print when a request already in the queue is"\ 2032255736Sdavidch " requested again") \ 2033255736Sdavidch \ 2034255736Sdavidch diagnostic(bool, InitialCompileFast, false, \ 2035255736Sdavidch "Initial compile at CompLevel_fast_compile") \ 2036255736Sdavidch \ 2037255736Sdavidch diagnostic(bool, InitialCompileReallyFast, false, \ 2038255736Sdavidch "Initial compile at CompLevel_really_fast_compile (no profile)") \ 2039255736Sdavidch \ 2040255736Sdavidch diagnostic(bool, FullProfileOnReInterpret, true, \ 2041255736Sdavidch "On re-interpret unc-trap compile next at CompLevel_fast_compile")\ 2042255736Sdavidch \ 2043255736Sdavidch /* compiler */ \ 2044255736Sdavidch \ 2045255736Sdavidch product(intx, CICompilerCount, CI_COMPILER_COUNT, \ 2046255736Sdavidch "Number of compiler threads to run") \ 2047255736Sdavidch \ 2048255736Sdavidch product(intx, CompilationPolicyChoice, 0, \ 2049255736Sdavidch "which compilation policy (0/1)") \ 2050255736Sdavidch \ 2051255736Sdavidch develop(bool, UseStackBanging, true, \ 2052255736Sdavidch "use stack banging for stack overflow checks (required for " \ 2053255736Sdavidch "proper StackOverflow handling; disable only to measure cost " \ 2054255736Sdavidch "of stackbanging)") \ 2055255736Sdavidch \ 2056255736Sdavidch develop(bool, Use24BitFPMode, true, \ 2057255736Sdavidch "Set 24-bit FPU mode on a per-compile basis ") \ 2058255736Sdavidch \ 2059255736Sdavidch develop(bool, Use24BitFP, true, \ 2060255736Sdavidch "use FP instructions that produce 24-bit precise results") \ 2061255736Sdavidch \ 2062255736Sdavidch develop(bool, UseStrictFP, true, \ 2063255736Sdavidch "use strict fp if modifier strictfp is set") \ 2064255736Sdavidch \ 2065255736Sdavidch develop(bool, GenerateSynchronizationCode, true, \ 2066255736Sdavidch "generate locking/unlocking code for synchronized methods and " \ 2067255736Sdavidch "monitors") \ 2068255736Sdavidch \ 2069255736Sdavidch develop(bool, GenerateCompilerNullChecks, true, \ 2070255736Sdavidch "Generate explicit null checks for loads/stores/calls") \ 2071255736Sdavidch \ 2072255736Sdavidch develop(bool, GenerateRangeChecks, true, \ 2073255736Sdavidch "Generate range checks for array accesses") \ 2074255736Sdavidch \ 2075255736Sdavidch develop_pd(bool, ImplicitNullChecks, \ 2076255736Sdavidch "generate code for implicit null checks") \ 2077255736Sdavidch \ 2078255736Sdavidch product(bool, PrintSafepointStatistics, false, \ 2079255736Sdavidch "print statistics about safepoint synchronization") \ 2080255736Sdavidch \ 2081255736Sdavidch product(intx, PrintSafepointStatisticsCount, 300, \ 2082255736Sdavidch "total number of safepoint statistics collected " \ 2083255736Sdavidch "before printing them out") \ 2084255736Sdavidch \ 2085255736Sdavidch product(intx, PrintSafepointStatisticsTimeout, -1, \ 2086255736Sdavidch "print safepoint statistics only when safepoint takes" \ 2087255736Sdavidch " more than PrintSafepointSatisticsTimeout in millis") \ 2088255736Sdavidch \ 2089255736Sdavidch develop(bool, InlineAccessors, true, \ 2090255736Sdavidch "inline accessor methods (get/set)") \ 2091255736Sdavidch \ 2092255736Sdavidch product(bool, Inline, true, \ 2093255736Sdavidch "enable inlining") \ 2094255736Sdavidch \ 2095255736Sdavidch product(bool, ClipInlining, true, \ 2096255736Sdavidch "clip inlining if aggregate method exceeds DesiredMethodLimit") \ 2097255736Sdavidch \ 2098255736Sdavidch develop(bool, UseCHA, true, \ 2099255736Sdavidch "enable CHA") \ 2100255736Sdavidch \ 2101255736Sdavidch product(bool, UseTypeProfile, true, \ 2102255736Sdavidch "Check interpreter profile for historically monomorphic calls") \ 2103255736Sdavidch \ 2104255736Sdavidch product(intx, TypeProfileMajorReceiverPercent, 90, \ 2105255736Sdavidch "% of major receiver type to all profiled receivers") \ 2106255736Sdavidch \ 2107255736Sdavidch notproduct(bool, TimeCompiler, false, \ 2108255736Sdavidch "time the compiler") \ 2109255736Sdavidch \ 2110255736Sdavidch notproduct(bool, TimeCompiler2, false, \ 2111255736Sdavidch "detailed time the compiler (requires +TimeCompiler)") \ 2112255736Sdavidch \ 2113255736Sdavidch diagnostic(bool, PrintInlining, false, \ 2114255736Sdavidch "prints inlining optimizations") \ 2115255736Sdavidch \ 2116255736Sdavidch diagnostic(bool, PrintIntrinsics, false, \ 2117255736Sdavidch "prints attempted and successful inlining of intrinsics") \ 2118255736Sdavidch \ 2119255736Sdavidch diagnostic(ccstrlist, DisableIntrinsic, "", \ 2120255736Sdavidch "do not expand intrinsics whose (internal) names appear here") \ 2121255736Sdavidch \ 2122255736Sdavidch develop(bool, StressReflectiveCode, false, \ 2123255736Sdavidch "Use inexact types at allocations, etc., to test reflection") \ 2124255736Sdavidch \ 2125255736Sdavidch develop(bool, EagerInitialization, false, \ 2126255736Sdavidch "Eagerly initialize classes if possible") \ 2127255736Sdavidch \ 2128255736Sdavidch product(bool, Tier1UpdateMethodData, trueInTiered, \ 2129255736Sdavidch "Update methodDataOops in Tier1-generated code") \ 2130255736Sdavidch \ 2131255736Sdavidch develop(bool, TraceMethodReplacement, false, \ 2132255736Sdavidch "Print when methods are replaced do to recompilation") \ 2133255736Sdavidch \ 2134255736Sdavidch develop(bool, PrintMethodFlushing, false, \ 2135255736Sdavidch "print the nmethods being flushed") \ 2136255736Sdavidch \ 2137255736Sdavidch notproduct(bool, LogMultipleMutexLocking, false, \ 2138255736Sdavidch "log locking and unlocking of mutexes (only if multiple locks " \ 2139255736Sdavidch "are held)") \ 2140255736Sdavidch \ 2141255736Sdavidch develop(bool, UseRelocIndex, false, \ 2142255736Sdavidch "use an index to speed random access to relocations") \ 2143255736Sdavidch \ 2144255736Sdavidch develop(bool, StressCodeBuffers, false, \ 2145255736Sdavidch "Exercise code buffer expansion and other rare state changes") \ 2146255736Sdavidch \ 2147255736Sdavidch diagnostic(bool, DebugNonSafepoints, trueInDebug, \ 2148255736Sdavidch "Generate extra debugging info for non-safepoints in nmethods") \ 2149255736Sdavidch \ 2150255736Sdavidch diagnostic(bool, DebugInlinedCalls, true, \ 2151255736Sdavidch "If false, restricts profiled locations to the root method only") \ 2152255736Sdavidch \ 2153255736Sdavidch product(bool, PrintVMOptions, trueInDebug, \ 2154255736Sdavidch "print VM flag settings") \ 2155255736Sdavidch \ 2156255736Sdavidch diagnostic(bool, SerializeVMOutput, true, \ 2157255736Sdavidch "Use a mutex to serialize output to tty and hotspot.log") \ 2158255736Sdavidch \ 2159255736Sdavidch diagnostic(bool, DisplayVMOutput, true, \ 2160255736Sdavidch "Display all VM output on the tty, independently of LogVMOutput") \ 2161255736Sdavidch \ 2162255736Sdavidch diagnostic(bool, LogVMOutput, trueInDebug, \ 2163255736Sdavidch "Save VM output to hotspot.log, or to LogFile") \ 2164255736Sdavidch \ 2165255736Sdavidch diagnostic(ccstr, LogFile, NULL, \ 2166255736Sdavidch "If LogVMOutput is on, save VM output to this file [hotspot.log]") \ 2167255736Sdavidch \ 2168255736Sdavidch product(ccstr, ErrorFile, NULL, \ 2169255736Sdavidch "If an error occurs, save the error data to this file " \ 2170255736Sdavidch "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 2171255736Sdavidch \ 2172255736Sdavidch product(bool, DisplayVMOutputToStderr, false, \ 2173255736Sdavidch "If DisplayVMOutput is true, display all VM output to stderr") \ 2174255736Sdavidch \ 2175255736Sdavidch product(bool, DisplayVMOutputToStdout, false, \ 2176255736Sdavidch "If DisplayVMOutput is true, display all VM output to stdout") \ 2177255736Sdavidch \ 2178255736Sdavidch product(bool, UseHeavyMonitors, false, \ 2179255736Sdavidch "use heavyweight instead of lightweight Java monitors") \ 2180255736Sdavidch \ 2181255736Sdavidch notproduct(bool, PrintSymbolTableSizeHistogram, false, \ 2182255736Sdavidch "print histogram of the symbol table") \ 2183255736Sdavidch \ 2184255736Sdavidch notproduct(bool, ExitVMOnVerifyError, false, \ 2185255736Sdavidch "standard exit from VM if bytecode verify error " \ 2186255736Sdavidch "(only in debug mode)") \ 2187255736Sdavidch \ 2188255736Sdavidch notproduct(ccstr, AbortVMOnException, NULL, \ 2189255736Sdavidch "Call fatal if this exception is thrown. Example: " \ 2190255736Sdavidch "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \ 2191255736Sdavidch \ 2192255736Sdavidch develop(bool, DebugVtables, false, \ 2193255736Sdavidch "add debugging code to vtable dispatch") \ 2194255736Sdavidch \ 2195255736Sdavidch develop(bool, PrintVtables, false, \ 2196255736Sdavidch "print vtables when printing klass") \ 2197255736Sdavidch \ 2198255736Sdavidch notproduct(bool, PrintVtableStats, false, \ 2199255736Sdavidch "print vtables stats at end of run") \ 2200255736Sdavidch \ 2201255736Sdavidch develop(bool, TraceCreateZombies, false, \ 2202255736Sdavidch "trace creation of zombie nmethods") \ 2203255736Sdavidch \ 2204255736Sdavidch notproduct(bool, IgnoreLockingAssertions, false, \ 2205255736Sdavidch "disable locking assertions (for speed)") \ 2206255736Sdavidch \ 2207296580Sdavidcs notproduct(bool, VerifyLoopOptimizations, false, \ 2208258203Sedavis "verify major loop optimizations") \ 2209258203Sedavis \ 2210255736Sdavidch product(bool, RangeCheckElimination, true, \ 2211255736Sdavidch "Split loop iterations to eliminate range checks") \ 2212255736Sdavidch \ 2213255736Sdavidch develop_pd(bool, UncommonNullCast, \ 2214255736Sdavidch "track occurrences of null in casts; adjust compiler tactics") \ 2215255736Sdavidch \ 2216255736Sdavidch develop(bool, TypeProfileCasts, true, \ 2217255736Sdavidch "treat casts like calls for purposes of type profiling") \ 2218296580Sdavidcs \ 2219296580Sdavidcs develop(bool, MonomorphicArrayCheck, true, \ 2220296580Sdavidcs "Uncommon-trap array store checks that require full type check") \ 2221255736Sdavidch \ 2222255736Sdavidch develop(bool, DelayCompilationDuringStartup, true, \ 2223255736Sdavidch "Delay invoking the compiler until main application class is " \ 2224255736Sdavidch "loaded") \ 2225255736Sdavidch \ 2226255736Sdavidch develop(bool, CompileTheWorld, false, \ 2227255736Sdavidch "Compile all methods in all classes in bootstrap class path " \ 2228255736Sdavidch "(stress test)") \ 2229255736Sdavidch \ 2230255736Sdavidch develop(bool, CompileTheWorldPreloadClasses, true, \ 2231255736Sdavidch "Preload all classes used by a class before start loading") \ 2232255736Sdavidch \ 2233255736Sdavidch notproduct(bool, CompileTheWorldIgnoreInitErrors, false, \ 2234255736Sdavidch "Compile all methods although class initializer failed") \ 2235255736Sdavidch \ 2236255736Sdavidch develop(bool, TraceIterativeGVN, false, \ 2237255736Sdavidch "Print progress during Iterative Global Value Numbering") \ 2238255736Sdavidch \ 2239255736Sdavidch develop(bool, FillDelaySlots, true, \ 2240255736Sdavidch "Fill delay slots (on SPARC only)") \ 2241255736Sdavidch \ 2242255736Sdavidch develop(bool, VerifyIterativeGVN, false, \ 2243255736Sdavidch "Verify Def-Use modifications during sparse Iterative Global " \ 2244255736Sdavidch "Value Numbering") \ 2245255736Sdavidch \ 2246255736Sdavidch notproduct(bool, TracePhaseCCP, false, \ 2247255736Sdavidch "Print progress during Conditional Constant Propagation") \ 2248255736Sdavidch \ 2249255736Sdavidch develop(bool, TimeLivenessAnalysis, false, \ 2250255736Sdavidch "Time computation of bytecode liveness analysis") \ 2251255736Sdavidch \ 2252255736Sdavidch develop(bool, TraceLivenessGen, false, \ 2253255736Sdavidch "Trace the generation of liveness analysis information") \ 2254255736Sdavidch \ 2255296580Sdavidcs notproduct(bool, TraceLivenessQuery, false, \ 2256296580Sdavidcs "Trace queries of liveness analysis information") \ 2257296580Sdavidcs \ 2258255736Sdavidch notproduct(bool, CollectIndexSetStatistics, false, \ 2259255736Sdavidch "Collect information about IndexSets") \ 2260255736Sdavidch \ 2261255736Sdavidch develop(bool, PrintDominators, false, \ 2262255736Sdavidch "Print out dominator trees for GVN") \ 2263255736Sdavidch \ 2264255736Sdavidch develop(bool, UseLoopSafepoints, true, \ 2265255736Sdavidch "Generate Safepoint nodes in every loop") \ 2266255736Sdavidch \ 2267255736Sdavidch notproduct(bool, TraceCISCSpill, false, \ 2268255736Sdavidch "Trace allocators use of cisc spillable instructions") \ 2269255736Sdavidch \ 2270255736Sdavidch notproduct(bool, TraceSpilling, false, \ 2271255736Sdavidch "Trace spilling") \ 2272255736Sdavidch \ 2273255736Sdavidch develop(bool, DeutschShiffmanExceptions, true, \ 2274255736Sdavidch "Fast check to find exception handler for precisely typed " \ 2275255736Sdavidch "exceptions") \ 2276255736Sdavidch \ 2277255736Sdavidch product(bool, SplitIfBlocks, true, \ 2278255736Sdavidch "Clone compares and control flow through merge points to fold " \ 2279255736Sdavidch "some branches") \ 2280255736Sdavidch \ 2281255736Sdavidch develop(intx, FastAllocateSizeLimit, 128*K, \ 2282255736Sdavidch /* Note: This value is zero mod 1<<13 for a cheap sparc set. */ \ 2283255736Sdavidch "Inline allocations larger than this in doublewords must go slow")\ 2284255736Sdavidch \ 2285255736Sdavidch product(bool, AggressiveOpts, false, \ 2286255736Sdavidch "Enable aggressive optimizations - see arguments.cpp") \ 2287255736Sdavidch \ 2288255736Sdavidch product(bool, UseStringCache, false, \ 2289255736Sdavidch "Enable String cache capabilities on String.java") \ 2290255736Sdavidch \ 2291255736Sdavidch /* statistics */ \ 2292255736Sdavidch develop(bool, UseVTune, false, \ 2293255736Sdavidch "enable support for Intel's VTune profiler") \ 2294255736Sdavidch \ 2295255736Sdavidch develop(bool, CountCompiledCalls, false, \ 2296255736Sdavidch "counts method invocations") \ 2297255736Sdavidch \ 2298255736Sdavidch notproduct(bool, CountRuntimeCalls, false, \ 2299255736Sdavidch "counts VM runtime calls") \ 2300255736Sdavidch \ 2301255736Sdavidch develop(bool, CountJNICalls, false, \ 2302255736Sdavidch "counts jni method invocations") \ 2303255736Sdavidch \ 2304255736Sdavidch notproduct(bool, CountJVMCalls, false, \ 2305255736Sdavidch "counts jvm method invocations") \ 2306255736Sdavidch \ 2307255736Sdavidch notproduct(bool, CountRemovableExceptions, false, \ 2308255736Sdavidch "count exceptions that could be replaced by branches due to " \ 2309255736Sdavidch "inlining") \ 2310255736Sdavidch \ 2311255736Sdavidch notproduct(bool, ICMissHistogram, false, \ 2312255736Sdavidch "produce histogram of IC misses") \ 2313255736Sdavidch \ 2314255736Sdavidch notproduct(bool, PrintClassStatistics, false, \ 2315255736Sdavidch "prints class statistics at end of run") \ 2316255736Sdavidch \ 2317255736Sdavidch notproduct(bool, PrintMethodStatistics, false, \ 2318255736Sdavidch "prints method statistics at end of run") \ 2319255736Sdavidch \ 2320255736Sdavidch /* interpreter */ \ 2321255736Sdavidch develop(bool, ClearInterpreterLocals, false, \ 2322255736Sdavidch "Always clear local variables of interpreter activations upon " \ 2323255736Sdavidch "entry") \ 2324255736Sdavidch \ 2325255736Sdavidch product_pd(bool, RewriteBytecodes, \ 2326255736Sdavidch "Allow rewriting of bytecodes (bytecodes are not immutable)") \ 2327255736Sdavidch \ 2328255736Sdavidch product_pd(bool, RewriteFrequentPairs, \ 2329255736Sdavidch "Rewrite frequently used bytecode pairs into a single bytecode") \ 2330255736Sdavidch \ 2331255736Sdavidch diagnostic(bool, PrintInterpreter, false, \ 2332255736Sdavidch "Prints the generated interpreter code") \ 2333255736Sdavidch \ 2334255736Sdavidch product(bool, UseInterpreter, true, \ 2335255736Sdavidch "Use interpreter for non-compiled methods") \ 2336255736Sdavidch \ 2337255736Sdavidch develop(bool, UseFastSignatureHandlers, true, \ 2338255736Sdavidch "Use fast signature handlers for native calls") \ 2339255736Sdavidch \ 2340255736Sdavidch develop(bool, UseV8InstrsOnly, false, \ 2341255736Sdavidch "Use SPARC-V8 Compliant instruction subset") \ 2342255736Sdavidch \ 2343255736Sdavidch product(bool, UseNiagaraInstrs, false, \ 2344255736Sdavidch "Use Niagara-efficient instruction subset") \ 2345255736Sdavidch \ 2346255736Sdavidch develop(bool, UseCASForSwap, false, \ 2347255736Sdavidch "Do not use swap instructions, but only CAS (in a loop) on SPARC")\ 2348255736Sdavidch \ 2349255736Sdavidch product(bool, UseLoopCounter, true, \ 2350255736Sdavidch "Increment invocation counter on backward branch") \ 2351255736Sdavidch \ 2352255736Sdavidch product(bool, UseFastEmptyMethods, true, \ 2353255736Sdavidch "Use fast method entry code for empty methods") \ 2354255736Sdavidch \ 2355255736Sdavidch product(bool, UseFastAccessorMethods, true, \ 2356255736Sdavidch "Use fast method entry code for accessor methods") \ 2357255736Sdavidch \ 2358255736Sdavidch product_pd(bool, UseOnStackReplacement, \ 2359255736Sdavidch "Use on stack replacement, calls runtime if invoc. counter " \ 2360255736Sdavidch "overflows in loop") \ 2361255736Sdavidch \ 2362255736Sdavidch notproduct(bool, TraceOnStackReplacement, false, \ 2363255736Sdavidch "Trace on stack replacement") \ 2364255736Sdavidch \ 2365255736Sdavidch develop(bool, PoisonOSREntry, true, \ 2366255736Sdavidch "Detect abnormal calls to OSR code") \ 2367255736Sdavidch \ 2368255736Sdavidch product_pd(bool, PreferInterpreterNativeStubs, \ 2369255736Sdavidch "Use always interpreter stubs for native methods invoked via " \ 2370255736Sdavidch "interpreter") \ 2371255736Sdavidch \ 2372255736Sdavidch develop(bool, CountBytecodes, false, \ 2373255736Sdavidch "Count number of bytecodes executed") \ 2374255736Sdavidch \ 2375255736Sdavidch develop(bool, PrintBytecodeHistogram, false, \ 2376255736Sdavidch "Print histogram of the executed bytecodes") \ 2377255736Sdavidch \ 2378255736Sdavidch develop(bool, PrintBytecodePairHistogram, false, \ 2379255736Sdavidch "Print histogram of the executed bytecode pairs") \ 2380255736Sdavidch \ 2381255736Sdavidch diagnostic(bool, PrintSignatureHandlers, false, \ 2382255736Sdavidch "Print code generated for native method signature handlers") \ 2383255736Sdavidch \ 2384255736Sdavidch develop(bool, VerifyOops, false, \ 2385255736Sdavidch "Do plausibility checks for oops") \ 2386255736Sdavidch \ 2387255736Sdavidch develop(bool, CheckUnhandledOops, false, \ 2388255736Sdavidch "Check for unhandled oops in VM code") \ 2389255736Sdavidch \ 2390255736Sdavidch develop(bool, VerifyJNIFields, trueInDebug, \ 2391255736Sdavidch "Verify jfieldIDs for instance fields") \ 2392255736Sdavidch \ 2393255736Sdavidch notproduct(bool, VerifyJNIEnvThread, false, \ 2394255736Sdavidch "Verify JNIEnv.thread == Thread::current() when entering VM " \ 2395255736Sdavidch "from JNI") \ 2396255736Sdavidch \ 2397255736Sdavidch develop(bool, VerifyFPU, false, \ 2398255736Sdavidch "Verify FPU state (check for NaN's, etc.)") \ 2399255736Sdavidch \ 2400255736Sdavidch develop(bool, VerifyThread, false, \ 2401255736Sdavidch "Watch the thread register for corruption (SPARC only)") \ 2402255736Sdavidch \ 2403255736Sdavidch develop(bool, VerifyActivationFrameSize, false, \ 2404255736Sdavidch "Verify that activation frame didn't become smaller than its " \ 2405255736Sdavidch "minimal size") \ 2406255736Sdavidch \ 2407255736Sdavidch develop(bool, TraceFrequencyInlining, false, \ 2408255736Sdavidch "Trace frequency based inlining") \ 2409255736Sdavidch \ 2410255736Sdavidch notproduct(bool, TraceTypeProfile, false, \ 2411255736Sdavidch "Trace type profile") \ 2412255736Sdavidch \ 2413255736Sdavidch develop_pd(bool, InlineIntrinsics, \ 2414255736Sdavidch "Inline intrinsics that can be statically resolved") \ 2415255736Sdavidch \ 2416255736Sdavidch product_pd(bool, ProfileInterpreter, \ 2417255736Sdavidch "Profile at the bytecode level during interpretation") \ 2418255736Sdavidch \ 2419255736Sdavidch develop_pd(bool, ProfileTraps, \ 2420255736Sdavidch "Profile deoptimization traps at the bytecode level") \ 2421255736Sdavidch \ 2422255736Sdavidch product(intx, ProfileMaturityPercentage, 20, \ 2423255736Sdavidch "number of method invocations/branches (expressed as % of " \ 2424255736Sdavidch "CompileThreshold) before using the method's profile") \ 2425255736Sdavidch \ 2426255736Sdavidch develop(bool, PrintMethodData, false, \ 2427255736Sdavidch "Print the results of +ProfileInterpreter at end of run") \ 2428255736Sdavidch \ 2429255736Sdavidch develop(bool, VerifyDataPointer, trueInDebug, \ 2430255736Sdavidch "Verify the method data pointer during interpreter profiling") \ 2431255736Sdavidch \ 2432255736Sdavidch develop(bool, VerifyCompiledCode, false, \ 2433255736Sdavidch "Include miscellaneous runtime verifications in nmethod code; " \ 2434255736Sdavidch "off by default because it disturbs nmethod size heuristics.") \ 2435255736Sdavidch \ 2436255736Sdavidch \ 2437255736Sdavidch /* compilation */ \ 2438255736Sdavidch product(bool, UseCompiler, true, \ 2439255736Sdavidch "use compilation") \ 2440255736Sdavidch \ 2441255736Sdavidch develop(bool, TraceCompilationPolicy, false, \ 2442255736Sdavidch "Trace compilation policy") \ 2443255736Sdavidch \ 2444255736Sdavidch develop(bool, TimeCompilationPolicy, false, \ 2445255736Sdavidch "Time the compilation policy") \ 2446255736Sdavidch \ 2447255736Sdavidch product(bool, UseCounterDecay, true, \ 2448255736Sdavidch "adjust recompilation counters") \ 2449255736Sdavidch \ 2450255736Sdavidch develop(intx, CounterHalfLifeTime, 30, \ 2451255736Sdavidch "half-life time of invocation counters (in secs)") \ 2452255736Sdavidch \ 2453255736Sdavidch develop(intx, CounterDecayMinIntervalLength, 500, \ 2454255736Sdavidch "Min. ms. between invocation of CounterDecay") \ 2455255736Sdavidch \ 2456255736Sdavidch product(bool, AlwaysCompileLoopMethods, false, \ 2457255736Sdavidch "when using recompilation, never interpret methods " \ 2458255736Sdavidch "containing loops") \ 2459255736Sdavidch \ 2460255736Sdavidch product(bool, DontCompileHugeMethods, true, \ 2461255736Sdavidch "don't compile methods > HugeMethodLimit") \ 2462255736Sdavidch \ 2463255736Sdavidch /* Bytecode escape analysis estimation. */ \ 2464255736Sdavidch product(bool, EstimateArgEscape, true, \ 2465255736Sdavidch "Analyze bytecodes to estimate escape state of arguments") \ 2466255736Sdavidch \ 2467255736Sdavidch product(intx, BCEATraceLevel, 0, \ 2468255736Sdavidch "How much tracing to do of bytecode escape analysis estimates") \ 2469255736Sdavidch \ 2470255736Sdavidch product(intx, MaxBCEAEstimateLevel, 5, \ 2471255736Sdavidch "Maximum number of nested calls that are analyzed by BC EA.") \ 2472255736Sdavidch \ 2473255736Sdavidch product(intx, MaxBCEAEstimateSize, 150, \ 2474255736Sdavidch "Maximum bytecode size of a method to be analyzed by BC EA.") \ 2475255736Sdavidch \ 2476255736Sdavidch product(intx, AllocatePrefetchStyle, 1, \ 2477296580Sdavidcs "0 = no prefetch, " \ 2478296580Sdavidcs "1 = prefetch instructions for each allocation, " \ 2479296580Sdavidcs "2 = use TLAB watermark to gate allocation prefetch") \ 2480296580Sdavidcs \ 2481296580Sdavidcs product(intx, AllocatePrefetchDistance, -1, \ 2482296580Sdavidcs "Distance to prefetch ahead of allocation pointer") \ 2483296580Sdavidcs \ 2484296580Sdavidcs product(intx, AllocatePrefetchLines, 1, \ 2485296580Sdavidcs "Number of lines to prefetch ahead of allocation pointer") \ 2486296580Sdavidcs \ 2487296580Sdavidcs product(intx, AllocatePrefetchStepSize, 16, \ 2488296580Sdavidcs "Step size in bytes of sequential prefetch instructions") \ 2489296580Sdavidcs \ 2490296580Sdavidcs product(intx, AllocatePrefetchInstr, 0, \ 2491296580Sdavidcs "Prefetch instruction to prefetch ahead of allocation pointer") \ 2492296580Sdavidcs \ 2493296580Sdavidcs product(intx, ReadPrefetchInstr, 0, \ 2494296580Sdavidcs "Prefetch instruction to prefetch ahead") \ 2495296580Sdavidcs \ 2496296580Sdavidcs /* deoptimization */ \ 2497296580Sdavidcs develop(bool, TraceDeoptimization, false, \ 2498296580Sdavidcs "Trace deoptimization") \ 2499296580Sdavidcs \ 2500296580Sdavidcs develop(bool, DebugDeoptimization, false, \ 2501296580Sdavidcs "Tracing various information while debugging deoptimization") \ 2502296580Sdavidcs \ 2503296580Sdavidcs product(intx, SelfDestructTimer, 0, \ 2504296580Sdavidcs "Will cause VM to terminate after a given time (in minutes) " \ 2505296580Sdavidcs "(0 means off)") \ 2506296580Sdavidcs \ 2507296580Sdavidcs product(intx, MaxJavaStackTraceDepth, 1024, \ 2508296580Sdavidcs "Max. no. of lines in the stack trace for Java exceptions " \ 2509296580Sdavidcs "(0 means all)") \ 2510296580Sdavidcs \ 2511296580Sdavidcs develop(intx, GuaranteedSafepointInterval, 1000, \ 2512296580Sdavidcs "Guarantee a safepoint (at least) every so many milliseconds " \ 2513296580Sdavidcs "(0 means none)") \ 2514296580Sdavidcs \ 2515296580Sdavidcs product(intx, SafepointTimeoutDelay, 10000, \ 2516296580Sdavidcs "Delay in milliseconds for option SafepointTimeout") \ 2517296580Sdavidcs \ 2518296580Sdavidcs product(intx, NmethodSweepFraction, 4, \ 2519296580Sdavidcs "Number of invocations of sweeper to cover all nmethods") \ 2520296580Sdavidcs \ 2521296580Sdavidcs notproduct(intx, MemProfilingInterval, 500, \ 2522296580Sdavidcs "Time between each invocation of the MemProfiler") \ 2523296580Sdavidcs \ 2524296580Sdavidcs develop(intx, MallocCatchPtr, -1, \ 2525255736Sdavidch "Hit breakpoint when mallocing/freeing this pointer") \ 2526255736Sdavidch \ 2527255736Sdavidch notproduct(intx, AssertRepeat, 1, \ 2528255736Sdavidch "number of times to evaluate expression in assert " \ 2529255736Sdavidch "(to estimate overhead); only works with -DUSE_REPEATED_ASSERTS") \ 2530255736Sdavidch \ 2531255736Sdavidch notproduct(ccstrlist, SuppressErrorAt, "", \ 2532255736Sdavidch "List of assertions (file:line) to muzzle") \ 2533255736Sdavidch \ 2534255736Sdavidch notproduct(uintx, HandleAllocationLimit, 1024, \ 2535255736Sdavidch "Threshold for HandleMark allocation when +TraceHandleAllocation "\ 2536255736Sdavidch "is used") \ 2537255736Sdavidch \ 2538255736Sdavidch develop(uintx, TotalHandleAllocationLimit, 1024, \ 2539255736Sdavidch "Threshold for total handle allocation when " \ 2540255736Sdavidch "+TraceHandleAllocation is used") \ 2541255736Sdavidch \ 2542255736Sdavidch develop(intx, StackPrintLimit, 100, \ 2543255736Sdavidch "number of stack frames to print in VM-level stack dump") \ 2544255736Sdavidch \ 2545255736Sdavidch notproduct(intx, MaxElementPrintSize, 256, \ 2546255736Sdavidch "maximum number of elements to print") \ 2547255736Sdavidch \ 2548255736Sdavidch notproduct(intx, MaxSubklassPrintSize, 4, \ 2549255736Sdavidch "maximum number of subklasses to print when printing klass") \ 2550255736Sdavidch \ 2551255736Sdavidch develop(intx, MaxInlineLevel, 9, \ 2552255736Sdavidch "maximum number of nested calls that are inlined") \ 2553255736Sdavidch \ 2554255736Sdavidch develop(intx, MaxRecursiveInlineLevel, 1, \ 2555255736Sdavidch "maximum number of nested recursive calls that are inlined") \ 2556255736Sdavidch \ 2557255736Sdavidch develop(intx, InlineSmallCode, 1000, \ 2558255736Sdavidch "Only inline already compiled methods if their code size is " \ 2559255736Sdavidch "less than this") \ 2560255736Sdavidch \ 2561255736Sdavidch product(intx, MaxInlineSize, 35, \ 2562255736Sdavidch "maximum bytecode size of a method to be inlined") \ 2563255736Sdavidch \ 2564255736Sdavidch product_pd(intx, FreqInlineSize, \ 2565255736Sdavidch "maximum bytecode size of a frequent method to be inlined") \ 2566255736Sdavidch \ 2567255736Sdavidch develop(intx, MaxTrivialSize, 6, \ 2568255736Sdavidch "maximum bytecode size of a trivial method to be inlined") \ 2569255736Sdavidch \ 2570255736Sdavidch develop(intx, MinInliningThreshold, 250, \ 2571255736Sdavidch "min. invocation count a method needs to have to be inlined") \ 2572255736Sdavidch \ 2573255736Sdavidch develop(intx, AlignEntryCode, 4, \ 2574255736Sdavidch "aligns entry code to specified value (in bytes)") \ 2575255736Sdavidch \ 2576255736Sdavidch develop(intx, MethodHistogramCutoff, 100, \ 2577255736Sdavidch "cutoff value for method invoc. histogram (+CountCalls)") \ 2578255736Sdavidch \ 2579255736Sdavidch develop(intx, ProfilerNumberOfInterpretedMethods, 25, \ 2580255736Sdavidch "# of interpreted methods to show in profile") \ 2581255736Sdavidch \ 2582255736Sdavidch develop(intx, ProfilerNumberOfCompiledMethods, 25, \ 2583255736Sdavidch "# of compiled methods to show in profile") \ 2584255736Sdavidch \ 2585255736Sdavidch develop(intx, ProfilerNumberOfStubMethods, 25, \ 2586255736Sdavidch "# of stub methods to show in profile") \ 2587255736Sdavidch \ 2588255736Sdavidch develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ 2589255736Sdavidch "# of runtime stub nodes to show in profile") \ 2590255736Sdavidch \ 2591255736Sdavidch product(intx, ProfileIntervalsTicks, 100, \ 2592255736Sdavidch "# of ticks between printing of interval profile " \ 2593255736Sdavidch "(+ProfileIntervals)") \ 2594255736Sdavidch \ 2595255736Sdavidch notproduct(intx, ScavengeALotInterval, 1, \ 2596255736Sdavidch "Interval between which scavenge will occur with +ScavengeALot") \ 2597255736Sdavidch \ 2598255736Sdavidch notproduct(intx, FullGCALotInterval, 1, \ 2599255736Sdavidch "Interval between which full gc will occur with +FullGCALot") \ 2600255736Sdavidch \ 2601255736Sdavidch notproduct(intx, FullGCALotStart, 0, \ 2602255736Sdavidch "For which invocation to start FullGCAlot") \ 2603255736Sdavidch \ 2604255736Sdavidch notproduct(intx, FullGCALotDummies, 32*K, \ 2605255736Sdavidch "Dummy object allocated with +FullGCALot, forcing all objects " \ 2606255736Sdavidch "to move") \ 2607255736Sdavidch \ 2608255736Sdavidch develop(intx, DontYieldALotInterval, 10, \ 2609255736Sdavidch "Interval between which yields will be dropped (milliseconds)") \ 2610255736Sdavidch \ 2611255736Sdavidch develop(intx, MinSleepInterval, 1, \ 2612255736Sdavidch "Minimum sleep() interval (milliseconds) when " \ 2613255736Sdavidch "ConvertSleepToYield is off (used for SOLARIS)") \ 2614255736Sdavidch \ 2615255736Sdavidch product(intx, EventLogLength, 2000, \ 2616255736Sdavidch "maximum nof events in event log") \ 2617255736Sdavidch \ 2618255736Sdavidch develop(intx, ProfilerPCTickThreshold, 15, \ 2619255736Sdavidch "Number of ticks in a PC buckets to be a hotspot") \ 2620255736Sdavidch \ 2621255736Sdavidch notproduct(intx, DeoptimizeALotInterval, 5, \ 2622255736Sdavidch "Number of exits until DeoptimizeALot kicks in") \ 2623255736Sdavidch \ 2624255736Sdavidch notproduct(intx, ZombieALotInterval, 5, \ 2625255736Sdavidch "Number of exits until ZombieALot kicks in") \ 2626255736Sdavidch \ 2627255736Sdavidch develop(bool, StressNonEntrant, false, \ 2628255736Sdavidch "Mark nmethods non-entrant at registration") \ 2629255736Sdavidch \ 2630255736Sdavidch diagnostic(intx, MallocVerifyInterval, 0, \ 2631255736Sdavidch "if non-zero, verify C heap after every N calls to " \ 2632255736Sdavidch "malloc/realloc/free") \ 2633255736Sdavidch \ 2634255736Sdavidch diagnostic(intx, MallocVerifyStart, 0, \ 2635255736Sdavidch "if non-zero, start verifying C heap after Nth call to " \ 2636255736Sdavidch "malloc/realloc/free") \ 2637255736Sdavidch \ 2638255736Sdavidch product(intx, TypeProfileWidth, 2, \ 2639255736Sdavidch "number of receiver types to record in call/cast profile") \ 2640255736Sdavidch \ 2641255736Sdavidch develop(intx, BciProfileWidth, 2, \ 2642255736Sdavidch "number of return bci's to record in ret profile") \ 2643255736Sdavidch \ 2644255736Sdavidch product(intx, PerMethodRecompilationCutoff, 400, \ 2645255736Sdavidch "After recompiling N times, stay in the interpreter (-1=>'Inf')") \ 2646255736Sdavidch \ 2647255736Sdavidch product(intx, PerBytecodeRecompilationCutoff, 100, \ 2648255736Sdavidch "Per-BCI limit on repeated recompilation (-1=>'Inf')") \ 2649255736Sdavidch \ 2650255736Sdavidch product(intx, PerMethodTrapLimit, 100, \ 2651255736Sdavidch "Limit on traps (of one kind) in a method (includes inlines)") \ 2652255736Sdavidch \ 2653255736Sdavidch product(intx, PerBytecodeTrapLimit, 4, \ 2654255736Sdavidch "Limit on traps (of one kind) at a particular BCI") \ 2655255736Sdavidch \ 2656255736Sdavidch develop(intx, FreqCountInvocations, 1, \ 2657255736Sdavidch "Scaling factor for branch frequencies (deprecated)") \ 2658255736Sdavidch \ 2659255736Sdavidch develop(intx, InlineFrequencyRatio, 20, \ 2660255736Sdavidch "Ratio of call site execution to caller method invocation") \ 2661255736Sdavidch \ 2662255736Sdavidch develop_pd(intx, InlineFrequencyCount, \ 2663255736Sdavidch "Count of call site execution necessary to trigger frequent " \ 2664255736Sdavidch "inlining") \ 2665255736Sdavidch \ 2666255736Sdavidch develop(intx, InlineThrowCount, 50, \ 2667255736Sdavidch "Force inlining of interpreted methods that throw this often") \ 2668255736Sdavidch \ 2669255736Sdavidch develop(intx, InlineThrowMaxSize, 200, \ 2670255736Sdavidch "Force inlining of throwing methods smaller than this") \ 2671255736Sdavidch \ 2672255736Sdavidch product(intx, AliasLevel, 3, \ 2673255736Sdavidch "0 for no aliasing, 1 for oop/field/static/array split, " \ 2674255736Sdavidch "2 for class split, 3 for unique instances") \ 2675255736Sdavidch \ 2676255736Sdavidch develop(bool, VerifyAliases, false, \ 2677255736Sdavidch "perform extra checks on the results of alias analysis") \ 2678255736Sdavidch \ 2679255736Sdavidch develop(intx, ProfilerNodeSize, 1024, \ 2680255736Sdavidch "Size in K to allocate for the Profile Nodes of each thread") \ 2681255736Sdavidch \ 2682255736Sdavidch develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \ 2683255736Sdavidch "Number of times to spin wait on a v8 atomic operation lock") \ 2684255736Sdavidch \ 2685255736Sdavidch product(intx, ReadSpinIterations, 100, \ 2686255736Sdavidch "Number of read attempts before a yield (spin inner loop)") \ 2687255736Sdavidch \ 2688255736Sdavidch product_pd(intx, PreInflateSpin, \ 2689255736Sdavidch "Number of times to spin wait before inflation") \ 2690255736Sdavidch \ 2691255736Sdavidch product(intx, PreBlockSpin, 10, \ 2692255736Sdavidch "Number of times to spin in an inflated lock before going to " \ 2693255736Sdavidch "an OS lock") \ 2694255736Sdavidch \ 2695255736Sdavidch /* gc parameters */ \ 2696258203Sedavis product(uintx, MaxHeapSize, ScaleForWordSize(64*M), \ 2697258203Sedavis "Default maximum size for object heap (in bytes)") \ 2698255736Sdavidch \ 2699255736Sdavidch product_pd(uintx, NewSize, \ 2700255736Sdavidch "Default size of new generation (in bytes)") \ 2701255736Sdavidch \ 2702255736Sdavidch product(uintx, MaxNewSize, max_uintx, \ 2703255736Sdavidch "Maximum size of new generation (in bytes)") \ 2704255736Sdavidch \ 2705255736Sdavidch product(uintx, PretenureSizeThreshold, 0, \ 2706255736Sdavidch "Max size in bytes of objects allocated in DefNew generation") \ 2707255736Sdavidch \ 2708255736Sdavidch product_pd(uintx, TLABSize, \ 2709258203Sedavis "Default (or starting) size of TLAB (in bytes)") \ 2710258203Sedavis \ 2711255736Sdavidch product(uintx, MinTLABSize, 2*K, \ 2712255736Sdavidch "Minimum allowed TLAB size (in bytes)") \ 2713255736Sdavidch \ 2714255736Sdavidch product(uintx, TLABAllocationWeight, 35, \ 2715255736Sdavidch "Allocation averaging weight") \ 2716255736Sdavidch \ 2717255736Sdavidch product(uintx, TLABWasteTargetPercent, 1, \ 2718255736Sdavidch "Percentage of Eden that can be wasted") \ 2719255736Sdavidch \ 2720255736Sdavidch product(uintx, TLABRefillWasteFraction, 64, \ 2721255736Sdavidch "Max TLAB waste at a refill (internal fragmentation)") \ 2722255736Sdavidch \ 2723255736Sdavidch product(uintx, TLABWasteIncrement, 4, \ 2724255736Sdavidch "Increment allowed waste at slow allocation") \ 2725255736Sdavidch \ 2726255736Sdavidch product_pd(intx, SurvivorRatio, \ 2727255736Sdavidch "Ratio of eden/survivor space size") \ 2728258203Sedavis \ 2729258203Sedavis product_pd(intx, NewRatio, \ 2730255736Sdavidch "Ratio of new/old generation sizes") \ 2731258203Sedavis \ 2732255736Sdavidch product(uintx, MaxLiveObjectEvacuationRatio, 100, \ 2733255736Sdavidch "Max percent of eden objects that will be live at scavenge") \ 2734255736Sdavidch \ 2735255736Sdavidch product_pd(uintx, NewSizeThreadIncrease, \ 2736255736Sdavidch "Additional size added to desired new generation size per " \ 2737255736Sdavidch "non-daemon thread (in bytes)") \ 2738255736Sdavidch \ 2739255736Sdavidch product(uintx, OldSize, ScaleForWordSize(4096*K), \ 2740255736Sdavidch "Default size of tenured generation (in bytes)") \ 2741255736Sdavidch \ 2742258203Sedavis product_pd(uintx, PermSize, \ 2743258203Sedavis "Default size of permanent generation (in bytes)") \ 2744255736Sdavidch \ 2745258203Sedavis product_pd(uintx, MaxPermSize, \ 2746255736Sdavidch "Maximum size of permanent generation (in bytes)") \ 2747255736Sdavidch \ 2748255736Sdavidch product(uintx, MinHeapFreeRatio, 40, \ 2749255736Sdavidch "Min percentage of heap free after GC to avoid expansion") \ 2750255736Sdavidch \ 2751255736Sdavidch product(uintx, MaxHeapFreeRatio, 70, \ 2752255736Sdavidch "Max percentage of heap free after GC to avoid shrinking") \ 2753255736Sdavidch \ 2754255736Sdavidch product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ 2755255736Sdavidch "Number of milliseconds per MB of free space in the heap") \ 2756255736Sdavidch \ 2757255736Sdavidch product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ 2758296580Sdavidcs "Min change in heap space due to GC (in bytes)") \ 2759296580Sdavidcs \ 2760255736Sdavidch product(uintx, MinPermHeapExpansion, ScaleForWordSize(256*K), \ 2761296580Sdavidcs "Min expansion of permanent heap (in bytes)") \ 2762296580Sdavidcs \ 2763255736Sdavidch product(uintx, MaxPermHeapExpansion, ScaleForWordSize(4*M), \ 2764255736Sdavidch "Max expansion of permanent heap without full GC (in bytes)") \ 2765255736Sdavidch \ 2766255736Sdavidch product(intx, QueuedAllocationWarningCount, 0, \ 2767255736Sdavidch "Number of times an allocation that queues behind a GC " \ 2768255736Sdavidch "will retry before printing a warning") \ 2769255736Sdavidch \ 2770255736Sdavidch diagnostic(uintx, VerifyGCStartAt, 0, \ 2771255736Sdavidch "GC invoke count where +VerifyBefore/AfterGC kicks in") \ 2772255736Sdavidch \ 2773255736Sdavidch diagnostic(intx, VerifyGCLevel, 0, \ 2774255736Sdavidch "Generation level at which to start +VerifyBefore/AfterGC") \ 2775255736Sdavidch \ 2776255736Sdavidch develop(uintx, ExitAfterGCNum, 0, \ 2777255736Sdavidch "If non-zero, exit after this GC.") \ 2778255736Sdavidch \ 2779255736Sdavidch product(intx, MaxTenuringThreshold, 15, \ 2780255736Sdavidch "Maximum value for tenuring threshold") \ 2781255736Sdavidch \ 2782255736Sdavidch product(intx, InitialTenuringThreshold, 7, \ 2783255736Sdavidch "Initial value for tenuring threshold") \ 2784255736Sdavidch \ 2785255736Sdavidch product(intx, TargetSurvivorRatio, 50, \ 2786255736Sdavidch "Desired percentage of survivor space used after scavenge") \ 2787255736Sdavidch \ 2788255736Sdavidch product(intx, MarkSweepDeadRatio, 5, \ 2789255736Sdavidch "Percentage (0-100) of the old gen allowed as dead wood." \ 2790255736Sdavidch "Serial mark sweep treats this as both the min and max value." \ 2791255736Sdavidch "CMS uses this value only if it falls back to mark sweep." \ 2792255736Sdavidch "Par compact uses a variable scale based on the density of the" \ 2793255736Sdavidch "generation and treats this as the max value when the heap is" \ 2794255736Sdavidch "either completely full or completely empty. Par compact also" \ 2795255736Sdavidch "has a smaller default value; see arguments.cpp.") \ 2796255736Sdavidch \ 2797255736Sdavidch product(intx, PermMarkSweepDeadRatio, 20, \ 2798255736Sdavidch "Percentage (0-100) of the perm gen allowed as dead wood." \ 2799255736Sdavidch "See MarkSweepDeadRatio for collector-specific comments.") \ 2800255736Sdavidch \ 2801255736Sdavidch product(intx, MarkSweepAlwaysCompactCount, 4, \ 2802255736Sdavidch "How often should we fully compact the heap (ignoring the dead " \ 2803255736Sdavidch "space parameters)") \ 2804255736Sdavidch \ 2805255736Sdavidch product(intx, PrintCMSStatistics, 0, \ 2806255736Sdavidch "Statistics for CMS") \ 2807255736Sdavidch \ 2808255736Sdavidch product(bool, PrintCMSInitiationStatistics, false, \ 2809255736Sdavidch "Statistics for initiating a CMS collection") \ 2810255736Sdavidch \ 2811255736Sdavidch product(intx, PrintFLSStatistics, 0, \ 2812255736Sdavidch "Statistics for CMS' FreeListSpace") \ 2813255736Sdavidch \ 2814255736Sdavidch product(intx, PrintFLSCensus, 0, \ 2815255736Sdavidch "Census for CMS' FreeListSpace") \ 2816255736Sdavidch \ 2817255736Sdavidch develop(uintx, GCExpandToAllocateDelayMillis, 0, \ 2818255736Sdavidch "Delay in ms between expansion and allocation") \ 2819255736Sdavidch \ 2820255736Sdavidch product(intx, DeferThrSuspendLoopCount, 4000, \ 2821255736Sdavidch "(Unstable) Number of times to iterate in safepoint loop " \ 2822255736Sdavidch " before blocking VM threads ") \ 2823255736Sdavidch \ 2824255736Sdavidch product(intx, DeferPollingPageLoopCount, -1, \ 2825255736Sdavidch "(Unsafe,Unstable) Number of iterations in safepoint loop " \ 2826255736Sdavidch "before changing safepoint polling page to RO ") \ 2827255736Sdavidch \ 2828255736Sdavidch product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 2829255736Sdavidch \ 2830255736Sdavidch product(bool, UseDepthFirstScavengeOrder, true, \ 2831255736Sdavidch "true: the scavenge order will be depth-first, " \ 2832255736Sdavidch "false: the scavenge order will be breadth-first") \ 2833255736Sdavidch \ 2834255736Sdavidch product(bool, PSChunkLargeArrays, true, \ 2835255736Sdavidch "true: process large arrays in chunks") \ 2836255736Sdavidch \ 2837255736Sdavidch product(uintx, GCDrainStackTargetSize, 64, \ 2838255736Sdavidch "how many entries we'll try to leave on the stack during " \ 2839255736Sdavidch "parallel GC") \ 2840255736Sdavidch \ 2841255736Sdavidch product(intx, DCQBarrierQueueBufferSize, 256, \ 2842255736Sdavidch "Number of elements in a dirty card queue buffer") \ 2843255736Sdavidch \ 2844255736Sdavidch product(intx, DCQBarrierProcessCompletedThreshold, 5, \ 2845255736Sdavidch "Number of completed dirty card buffers to trigger processing.") \ 2846255736Sdavidch \ 2847255736Sdavidch /* stack parameters */ \ 2848255736Sdavidch product_pd(intx, StackYellowPages, \ 2849255736Sdavidch "Number of yellow zone (recoverable overflows) pages") \ 2850255736Sdavidch \ 2851255736Sdavidch product_pd(intx, StackRedPages, \ 2852255736Sdavidch "Number of red zone (unrecoverable overflows) pages") \ 2853255736Sdavidch \ 2854255736Sdavidch product_pd(intx, StackShadowPages, \ 2855255736Sdavidch "Number of shadow zone (for overflow checking) pages" \ 2856255736Sdavidch " this should exceed the depth of the VM and native call stack") \ 2857255736Sdavidch \ 2858255736Sdavidch product_pd(intx, ThreadStackSize, \ 2859255736Sdavidch "Thread Stack Size (in Kbytes)") \ 2860255736Sdavidch \ 2861255736Sdavidch product_pd(intx, VMThreadStackSize, \ 2862255736Sdavidch "Non-Java Thread Stack Size (in Kbytes)") \ 2863255736Sdavidch \ 2864255736Sdavidch product_pd(intx, CompilerThreadStackSize, \ 2865255736Sdavidch "Compiler Thread Stack Size (in Kbytes)") \ 2866255736Sdavidch \ 2867255736Sdavidch develop_pd(uintx, JVMInvokeMethodSlack, \ 2868255736Sdavidch "Stack space (bytes) required for JVM_InvokeMethod to complete") \ 2869255736Sdavidch \ 2870255736Sdavidch product(uintx, ThreadSafetyMargin, 50*M, \ 2871255736Sdavidch "Thread safety margin is used on fixed-stack LinuxThreads (on " \ 2872255736Sdavidch "Linux/x86 only) to prevent heap-stack collision. Set to 0 to " \ 2873255736Sdavidch "disable this feature") \ 2874255736Sdavidch \ 2875255736Sdavidch /* code cache parameters */ \ 2876255736Sdavidch develop(uintx, CodeCacheSegmentSize, 64, \ 2877255736Sdavidch "Code cache segment size (in bytes) - smallest unit of " \ 2878255736Sdavidch "allocation") \ 2879255736Sdavidch \ 2880255736Sdavidch develop_pd(intx, CodeEntryAlignment, \ 2881255736Sdavidch "Code entry alignment for generated code (in bytes)") \ 2882255736Sdavidch \ 2883255736Sdavidch product_pd(uintx, InitialCodeCacheSize, \ 2884255736Sdavidch "Initial code cache size (in bytes)") \ 2885255736Sdavidch \ 2886255736Sdavidch product_pd(uintx, ReservedCodeCacheSize, \ 2887258203Sedavis "Reserved code cache size (in bytes) - maximum code cache size") \ 2888255736Sdavidch \ 2889255736Sdavidch product(uintx, CodeCacheMinimumFreeSpace, 500*K, \ 2890255736Sdavidch "When less than X space left, we stop compiling.") \ 2891255736Sdavidch \ 2892255736Sdavidch product_pd(uintx, CodeCacheExpansionSize, \ 2893255736Sdavidch "Code cache expansion size (in bytes)") \ 2894255736Sdavidch \ 2895255736Sdavidch develop_pd(uintx, CodeCacheMinBlockLength, \ 2896255736Sdavidch "Minimum number of segments in a code cache block.") \ 2897255736Sdavidch \ 2898255736Sdavidch notproduct(bool, ExitOnFullCodeCache, false, \ 2899255736Sdavidch "Exit the VM if we fill the code cache.") \ 2900255736Sdavidch \ 2901255736Sdavidch /* interpreter debugging */ \ 2902255736Sdavidch develop(intx, BinarySwitchThreshold, 5, \ 2903255736Sdavidch "Minimal number of lookupswitch entries for rewriting to binary " \ 2904255736Sdavidch "switch") \ 2905255736Sdavidch \ 2906255736Sdavidch develop(intx, StopInterpreterAt, 0, \ 2907255736Sdavidch "Stops interpreter execution at specified bytecode number") \ 2908255736Sdavidch \ 2909255736Sdavidch develop(intx, TraceBytecodesAt, 0, \ 2910255736Sdavidch "Traces bytecodes starting with specified bytecode number") \ 2911255736Sdavidch \ 2912255736Sdavidch /* compiler interface */ \ 2913255736Sdavidch develop(intx, CIStart, 0, \ 2914255736Sdavidch "the id of the first compilation to permit") \ 2915255736Sdavidch \ 2916255736Sdavidch develop(intx, CIStop, -1, \ 2917255736Sdavidch "the id of the last compilation to permit") \ 2918255736Sdavidch \ 2919255736Sdavidch develop(intx, CIStartOSR, 0, \ 2920255736Sdavidch "the id of the first osr compilation to permit " \ 2921255736Sdavidch "(CICountOSR must be on)") \ 2922255736Sdavidch \ 2923255736Sdavidch develop(intx, CIStopOSR, -1, \ 2924255736Sdavidch "the id of the last osr compilation to permit " \ 2925255736Sdavidch "(CICountOSR must be on)") \ 2926255736Sdavidch \ 2927255736Sdavidch develop(intx, CIBreakAtOSR, -1, \ 2928255736Sdavidch "id of osr compilation to break at") \ 2929255736Sdavidch \ 2930255736Sdavidch develop(intx, CIBreakAt, -1, \ 2931255736Sdavidch "id of compilation to break at") \ 2932255736Sdavidch \ 2933255736Sdavidch product(ccstrlist, CompileOnly, "", \ 2934255736Sdavidch "List of methods (pkg/class.name) to restrict compilation to") \ 2935255736Sdavidch \ 2936255736Sdavidch product(ccstr, CompileCommandFile, NULL, \ 2937255736Sdavidch "Read compiler commands from this file [.hotspot_compiler]") \ 2938255736Sdavidch \ 2939255736Sdavidch product(ccstrlist, CompileCommand, "", \ 2940255736Sdavidch "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \ 2941255736Sdavidch \ 2942255736Sdavidch product(bool, CICompilerCountPerCPU, false, \ 2943255736Sdavidch "1 compiler thread for log(N CPUs)") \ 2944255736Sdavidch \ 2945255736Sdavidch develop(intx, CIFireOOMAt, -1, \ 2946255736Sdavidch "Fire OutOfMemoryErrors throughout CI for testing the compiler " \ 2947255736Sdavidch "(non-negative value throws OOM after this many CI accesses " \ 2948255736Sdavidch "in each compile)") \ 2949255736Sdavidch \ 2950255736Sdavidch develop(intx, CIFireOOMAtDelay, -1, \ 2951255736Sdavidch "Wait for this many CI accesses to occur in all compiles before " \ 2952255736Sdavidch "beginning to throw OutOfMemoryErrors in each compile") \ 2953255736Sdavidch \ 2954255736Sdavidch /* Priorities */ \ 2955255736Sdavidch product_pd(bool, UseThreadPriorities, "Use native thread priorities") \ 2956255736Sdavidch \ 2957255736Sdavidch product(intx, ThreadPriorityPolicy, 0, \ 2958255736Sdavidch "0 : Normal. "\ 2959255736Sdavidch " VM chooses priorities that are appropriate for normal "\ 2960255736Sdavidch " applications. On Solaris NORM_PRIORITY and above are mapped "\ 2961255736Sdavidch " to normal native priority. Java priorities below NORM_PRIORITY"\ 2962255736Sdavidch " map to lower native priority values. On Windows applications"\ 2963255736Sdavidch " are allowed to use higher native priorities. However, with "\ 2964255736Sdavidch " ThreadPriorityPolicy=0, VM will not use the highest possible"\ 2965255736Sdavidch " native priority, THREAD_PRIORITY_TIME_CRITICAL, as it may "\ 2966255736Sdavidch " interfere with system threads. On Linux thread priorities "\ 2967255736Sdavidch " are ignored because the OS does not support static priority "\ 2968255736Sdavidch " in SCHED_OTHER scheduling class which is the only choice for"\ 2969255736Sdavidch " non-root, non-realtime applications. "\ 2970255736Sdavidch "1 : Aggressive. "\ 2971255736Sdavidch " Java thread priorities map over to the entire range of "\ 2972255736Sdavidch " native thread priorities. Higher Java thread priorities map "\ 2973255736Sdavidch " to higher native thread priorities. This policy should be "\ 2974255736Sdavidch " used with care, as sometimes it can cause performance "\ 2975255736Sdavidch " degradation in the application and/or the entire system. On "\ 2976255736Sdavidch " Linux this policy requires root privilege.") \ 2977255736Sdavidch \ 2978255736Sdavidch product(bool, ThreadPriorityVerbose, false, \ 2979255736Sdavidch "print priority changes") \ 2980255736Sdavidch \ 2981255736Sdavidch product(intx, DefaultThreadPriority, -1, \ 2982255736Sdavidch "what native priority threads run at if not specified elsewhere (-1 means no change)") \ 2983255736Sdavidch \ 2984255736Sdavidch product(intx, CompilerThreadPriority, -1, \ 2985255736Sdavidch "what priority should compiler threads run at (-1 means no change)") \ 2986255736Sdavidch \ 2987255736Sdavidch product(intx, VMThreadPriority, -1, \ 2988255736Sdavidch "what priority should VM threads run at (-1 means no change)") \ 2989255736Sdavidch \ 2990255736Sdavidch product(bool, CompilerThreadHintNoPreempt, true, \ 2991255736Sdavidch "(Solaris only) Give compiler threads an extra quanta") \ 2992255736Sdavidch \ 2993255736Sdavidch product(bool, VMThreadHintNoPreempt, false, \ 2994255736Sdavidch "(Solaris only) Give VM thread an extra quanta") \ 2995255736Sdavidch \ 2996255736Sdavidch product(intx, JavaPriority1_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 2997255736Sdavidch product(intx, JavaPriority2_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 2998255736Sdavidch product(intx, JavaPriority3_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 2999255736Sdavidch product(intx, JavaPriority4_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 3000255736Sdavidch product(intx, JavaPriority5_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 3001255736Sdavidch product(intx, JavaPriority6_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 3002255736Sdavidch product(intx, JavaPriority7_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 3003255736Sdavidch product(intx, JavaPriority8_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 3004255736Sdavidch product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 3005255736Sdavidch product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \ 3006255736Sdavidch \ 3007255736Sdavidch /* compiler debugging */ \ 3008255736Sdavidch notproduct(intx, CompileTheWorldStartAt, 1, \ 3009255736Sdavidch "First class to consider when using +CompileTheWorld") \ 3010255736Sdavidch \ 3011255736Sdavidch notproduct(intx, CompileTheWorldStopAt, max_jint, \ 3012255736Sdavidch "Last class to consider when using +CompileTheWorld") \ 3013255736Sdavidch \ 3014255736Sdavidch develop(intx, NewCodeParameter, 0, \ 3015255736Sdavidch "Testing Only: Create a dedicated integer parameter before " \ 3016255736Sdavidch "putback") \ 3017255736Sdavidch \ 3018255736Sdavidch /* new oopmap storage allocation */ \ 3019255736Sdavidch develop(intx, MinOopMapAllocation, 8, \ 3020255736Sdavidch "Minimum number of OopMap entries in an OopMapSet") \ 3021255736Sdavidch \ 3022255736Sdavidch /* Background Compilation */ \ 3023255736Sdavidch develop(intx, LongCompileThreshold, 50, \ 3024255736Sdavidch "Used with +TraceLongCompiles") \ 3025255736Sdavidch \ 3026255736Sdavidch product(intx, StarvationMonitorInterval, 200, \ 3027255736Sdavidch "Pause between each check in ms") \ 3028255736Sdavidch \ 3029255736Sdavidch /* recompilation */ \ 3030255736Sdavidch product_pd(intx, CompileThreshold, \ 3031255736Sdavidch "number of interpreted method invocations before (re-)compiling") \ 3032255736Sdavidch \ 3033255736Sdavidch product_pd(intx, BackEdgeThreshold, \ 3034255736Sdavidch "Interpreter Back edge threshold at which an OSR compilation is invoked")\ 3035255736Sdavidch \ 3036255736Sdavidch product(intx, Tier1BytecodeLimit, 10, \ 3037255736Sdavidch "Must have at least this many bytecodes before tier1" \ 3038255736Sdavidch "invocation counters are used") \ 3039255736Sdavidch \ 3040255736Sdavidch product_pd(intx, Tier2CompileThreshold, \ 3041255736Sdavidch "threshold at which a tier 2 compilation is invoked") \ 3042255736Sdavidch \ 3043255736Sdavidch product_pd(intx, Tier2BackEdgeThreshold, \ 3044255736Sdavidch "Back edge threshold at which a tier 2 compilation is invoked") \ 3045255736Sdavidch \ 3046255736Sdavidch product_pd(intx, Tier3CompileThreshold, \ 3047255736Sdavidch "threshold at which a tier 3 compilation is invoked") \ 3048255736Sdavidch \ 3049255736Sdavidch product_pd(intx, Tier3BackEdgeThreshold, \ 3050255736Sdavidch "Back edge threshold at which a tier 3 compilation is invoked") \ 3051255736Sdavidch \ 3052255736Sdavidch product_pd(intx, Tier4CompileThreshold, \ 3053255736Sdavidch "threshold at which a tier 4 compilation is invoked") \ 3054255736Sdavidch \ 3055255736Sdavidch product_pd(intx, Tier4BackEdgeThreshold, \ 3056255736Sdavidch "Back edge threshold at which a tier 4 compilation is invoked") \ 3057255736Sdavidch \ 3058255736Sdavidch product_pd(bool, TieredCompilation, \ 3059255736Sdavidch "Enable two-tier compilation") \ 3060255736Sdavidch \ 3061255736Sdavidch product(bool, StressTieredRuntime, false, \ 3062255736Sdavidch "Alternate client and server compiler on compile requests") \ 3063255736Sdavidch \ 3064255736Sdavidch product_pd(intx, OnStackReplacePercentage, \ 3065255736Sdavidch "NON_TIERED number of method invocations/branches (expressed as %"\ 3066255736Sdavidch "of CompileThreshold) before (re-)compiling OSR code") \ 3067255736Sdavidch \ 3068255736Sdavidch product(intx, InterpreterProfilePercentage, 33, \ 3069255736Sdavidch "NON_TIERED number of method invocations/branches (expressed as %"\ 3070255736Sdavidch "of CompileThreshold) before profiling in the interpreter") \ 3071255736Sdavidch \ 3072255736Sdavidch develop(intx, MaxRecompilationSearchLength, 10, \ 3073255736Sdavidch "max. # frames to inspect searching for recompilee") \ 3074255736Sdavidch \ 3075255736Sdavidch develop(intx, MaxInterpretedSearchLength, 3, \ 3076255736Sdavidch "max. # interp. frames to skip when searching for recompilee") \ 3077255736Sdavidch \ 3078255736Sdavidch develop(intx, DesiredMethodLimit, 8000, \ 3079255736Sdavidch "desired max. method size (in bytecodes) after inlining") \ 3080255736Sdavidch \ 3081255736Sdavidch develop(intx, HugeMethodLimit, 8000, \ 3082255736Sdavidch "don't compile methods larger than this if " \ 3083255736Sdavidch "+DontCompileHugeMethods") \ 3084255736Sdavidch \ 3085255736Sdavidch /* New JDK 1.4 reflection implementation */ \ 3086255736Sdavidch \ 3087255736Sdavidch develop(bool, UseNewReflection, true, \ 3088255736Sdavidch "Temporary flag for transition to reflection based on dynamic " \ 3089255736Sdavidch "bytecode generation in 1.4; can no longer be turned off in 1.4 " \ 3090255736Sdavidch "JDK, and is unneeded in 1.3 JDK, but marks most places VM " \ 3091255736Sdavidch "changes were needed") \ 3092255736Sdavidch \ 3093255736Sdavidch develop(bool, VerifyReflectionBytecodes, false, \ 3094255736Sdavidch "Force verification of 1.4 reflection bytecodes. Does not work " \ 3095255736Sdavidch "in situations like that described in 4486457 or for " \ 3096255736Sdavidch "constructors generated for serialization, so can not be enabled "\ 3097255736Sdavidch "in product.") \ 3098255736Sdavidch \ 3099255736Sdavidch product(bool, ReflectionWrapResolutionErrors, true, \ 3100255736Sdavidch "Temporary flag for transition to AbstractMethodError wrapped " \ 3101255736Sdavidch "in InvocationTargetException. See 6531596") \ 3102255736Sdavidch \ 3103255736Sdavidch \ 3104255736Sdavidch develop(intx, FastSuperclassLimit, 8, \ 3105255736Sdavidch "Depth of hardwired instanceof accelerator array") \ 3106255736Sdavidch \ 3107255736Sdavidch /* Properties for Java libraries */ \ 3108255736Sdavidch \ 3109255736Sdavidch product(intx, MaxDirectMemorySize, -1, \ 3110255736Sdavidch "Maximum total size of NIO direct-buffer allocations") \ 3111255736Sdavidch \ 3112255736Sdavidch /* temporary developer defined flags */ \ 3113255736Sdavidch \ 3114255736Sdavidch diagnostic(bool, UseNewCode, false, \ 3115255736Sdavidch "Testing Only: Use the new version while testing") \ 3116255736Sdavidch \ 3117255736Sdavidch diagnostic(bool, UseNewCode2, false, \ 3118255736Sdavidch "Testing Only: Use the new version while testing") \ 3119255736Sdavidch \ 3120255736Sdavidch diagnostic(bool, UseNewCode3, false, \ 3121255736Sdavidch "Testing Only: Use the new version while testing") \ 3122255736Sdavidch \ 3123255736Sdavidch /* flags for performance data collection */ \ 3124255736Sdavidch \ 3125255736Sdavidch product(bool, UsePerfData, true, \ 3126255736Sdavidch "Flag to disable jvmstat instrumentation for performance testing" \ 3127255736Sdavidch "and problem isolation purposes.") \ 3128255736Sdavidch \ 3129255736Sdavidch product(bool, PerfDataSaveToFile, false, \ 3130255736Sdavidch "Save PerfData memory to hsperfdata_<pid> file on exit") \ 3131255736Sdavidch \ 3132255736Sdavidch product(ccstr, PerfDataSaveFile, NULL, \ 3133255736Sdavidch "Save PerfData memory to the specified absolute pathname," \ 3134255736Sdavidch "%p in the file name if present will be replaced by pid") \ 3135255736Sdavidch \ 3136255736Sdavidch product(intx, PerfDataSamplingInterval, 50 /*ms*/, \ 3137255736Sdavidch "Data sampling interval in milliseconds") \ 3138255736Sdavidch \ 3139255736Sdavidch develop(bool, PerfTraceDataCreation, false, \ 3140255736Sdavidch "Trace creation of Performance Data Entries") \ 3141255736Sdavidch \ 3142255736Sdavidch develop(bool, PerfTraceMemOps, false, \ 3143255736Sdavidch "Trace PerfMemory create/attach/detach calls") \ 3144255736Sdavidch \ 3145255736Sdavidch product(bool, PerfDisableSharedMem, false, \ 3146255736Sdavidch "Store performance data in standard memory") \ 3147255736Sdavidch \ 3148255736Sdavidch product(intx, PerfDataMemorySize, 32*K, \ 3149255736Sdavidch "Size of performance data memory region. Will be rounded " \ 3150255736Sdavidch "up to a multiple of the native os page size.") \ 3151255736Sdavidch \ 3152255736Sdavidch product(intx, PerfMaxStringConstLength, 1024, \ 3153255736Sdavidch "Maximum PerfStringConstant string length before truncation") \ 3154255736Sdavidch \ 3155255736Sdavidch product(bool, PerfAllowAtExitRegistration, false, \ 3156255736Sdavidch "Allow registration of atexit() methods") \ 3157255736Sdavidch \ 3158255736Sdavidch product(bool, PerfBypassFileSystemCheck, false, \ 3159255736Sdavidch "Bypass Win32 file system criteria checks (Windows Only)") \ 3160255736Sdavidch \ 3161255736Sdavidch product(intx, UnguardOnExecutionViolation, 0, \ 3162255736Sdavidch "Unguard page and retry on no-execute fault (Win32 only)" \ 3163255736Sdavidch "0=off, 1=conservative, 2=aggressive") \ 3164255736Sdavidch \ 3165255736Sdavidch /* Serviceability Support */ \ 3166255736Sdavidch \ 3167255736Sdavidch product(bool, ManagementServer, false, \ 3168255736Sdavidch "Create JMX Management Server") \ 3169255736Sdavidch \ 3170255736Sdavidch product(bool, DisableAttachMechanism, false, \ 3171255736Sdavidch "Disable mechanism that allows tools to attach to this VM") \ 3172255736Sdavidch \ 3173255736Sdavidch product(bool, StartAttachListener, false, \ 3174255736Sdavidch "Always start Attach Listener at VM startup") \ 3175255736Sdavidch \ 3176255736Sdavidch manageable(bool, PrintConcurrentLocks, false, \ 3177255736Sdavidch "Print java.util.concurrent locks in thread dump") \ 3178255736Sdavidch \ 3179255736Sdavidch /* Shared spaces */ \ 3180255736Sdavidch \ 3181255736Sdavidch product(bool, UseSharedSpaces, true, \ 3182255736Sdavidch "Use shared spaces in the permanent generation") \ 3183255736Sdavidch \ 3184255736Sdavidch product(bool, RequireSharedSpaces, false, \ 3185255736Sdavidch "Require shared spaces in the permanent generation") \ 3186255736Sdavidch \ 3187255736Sdavidch product(bool, ForceSharedSpaces, false, \ 3188255736Sdavidch "Require shared spaces in the permanent generation") \ 3189255736Sdavidch \ 3190255736Sdavidch product(bool, DumpSharedSpaces, false, \ 3191255736Sdavidch "Special mode: JVM reads a class list, loads classes, builds " \ 3192255736Sdavidch "shared spaces, and dumps the shared spaces to a file to be " \ 3193255736Sdavidch "used in future JVM runs.") \ 3194255736Sdavidch \ 3195255736Sdavidch product(bool, PrintSharedSpaces, false, \ 3196255736Sdavidch "Print usage of shared spaces") \ 3197255736Sdavidch \ 3198255736Sdavidch product(uintx, SharedDummyBlockSize, 512*M, \ 3199255736Sdavidch "Size of dummy block used to shift heap addresses (in bytes)") \ 3200255736Sdavidch \ 3201255736Sdavidch product(uintx, SharedReadWriteSize, 12*M, \ 3202255736Sdavidch "Size of read-write space in permanent generation (in bytes)") \ 3203255736Sdavidch \ 3204255736Sdavidch product(uintx, SharedReadOnlySize, 8*M, \ 3205255736Sdavidch "Size of read-only space in permanent generation (in bytes)") \ 3206255736Sdavidch \ 3207255736Sdavidch product(uintx, SharedMiscDataSize, 4*M, \ 3208255736Sdavidch "Size of the shared data area adjacent to the heap (in bytes)") \ 3209255736Sdavidch \ 3210255736Sdavidch product(uintx, SharedMiscCodeSize, 4*M, \ 3211255736Sdavidch "Size of the shared code area adjacent to the heap (in bytes)") \ 3212255736Sdavidch \ 3213255736Sdavidch diagnostic(bool, SharedOptimizeColdStart, true, \ 3214255736Sdavidch "At dump time, order shared objects to achieve better " \ 3215255736Sdavidch "cold startup time.") \ 3216255736Sdavidch \ 3217255736Sdavidch develop(intx, SharedOptimizeColdStartPolicy, 2, \ 3218255736Sdavidch "Reordering policy for SharedOptimizeColdStart " \ 3219255736Sdavidch "0=favor classload-time locality, 1=balanced, " \ 3220255736Sdavidch "2=favor runtime locality") \ 3221255736Sdavidch \ 3222255736Sdavidch diagnostic(bool, SharedSkipVerify, false, \ 3223255736Sdavidch "Skip assert() and verify() which page-in unwanted shared " \ 3224255736Sdavidch "objects. ") \ 3225255736Sdavidch \ 3226255736Sdavidch product(bool, TaggedStackInterpreter, false, \ 3227255736Sdavidch "Insert tags in interpreter execution stack for oopmap generaion")\ 3228255736Sdavidch \ 3229255736Sdavidch diagnostic(bool, PauseAtStartup, false, \ 3230255736Sdavidch "Causes the VM to pause at startup time and wait for the pause " \ 3231255736Sdavidch "file to be removed (default: ./vm.paused.<pid>)") \ 3232255736Sdavidch \ 3233255736Sdavidch diagnostic(ccstr, PauseAtStartupFile, NULL, \ 3234255736Sdavidch "The file to create and for whose removal to await when pausing " \ 3235255736Sdavidch "at startup. (default: ./vm.paused.<pid>)") \ 3236255736Sdavidch \ 3237255736Sdavidch product(bool, ExtendedDTraceProbes, false, \ 3238255736Sdavidch "Enable performance-impacting dtrace probes") \ 3239255736Sdavidch \ 3240255736Sdavidch product(bool, DTraceMethodProbes, false, \ 3241255736Sdavidch "Enable dtrace probes for method-entry and method-exit") \ 3242255736Sdavidch \ 3243255736Sdavidch product(bool, DTraceAllocProbes, false, \ 3244255736Sdavidch "Enable dtrace probes for object allocation") \ 3245255736Sdavidch \ 3246255736Sdavidch product(bool, DTraceMonitorProbes, false, \ 3247255736Sdavidch "Enable dtrace probes for monitor events") \ 3248255736Sdavidch \ 3249255736Sdavidch product(bool, RelaxAccessControlCheck, false, \ 3250255736Sdavidch "Relax the access control checks in the verifier") \ 3251255736Sdavidch \ 3252255736Sdavidch diagnostic(bool, PrintDTraceDOF, false, \ 3253255736Sdavidch "Print the DTrace DOF passed to the system for JSDT probes") \ 3254255736Sdavidch \ 3255255736Sdavidch product(bool, UseVMInterruptibleIO, true, \ 3256255736Sdavidch "(Unstable, Solaris-specific) Thread interrupt before or with " \ 3257255736Sdavidch "EINTR for I/O operations results in OS_INTRPT") 3258255736Sdavidch 3259255736Sdavidch 3260255736Sdavidch/* 3261255736Sdavidch * Macros for factoring of globals 3262255736Sdavidch */ 3263255736Sdavidch 3264255736Sdavidch// Interface macros 3265255736Sdavidch#define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 3266255736Sdavidch#define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name; 3267255736Sdavidch#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name; 3268255736Sdavidch#define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name; 3269255736Sdavidch#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name; 3270255736Sdavidch#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name; 3271255736Sdavidch#ifdef PRODUCT 3272255736Sdavidch#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) const type name = value; 3273255736Sdavidch#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) const type name = pd_##name; 3274255736Sdavidch#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) 3275255736Sdavidch#else 3276255736Sdavidch#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name; 3277255736Sdavidch#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name; 3278255736Sdavidch#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name; 3279255736Sdavidch#endif 3280255736Sdavidch// Special LP64 flags, product only needed for now. 3281255736Sdavidch#ifdef _LP64 3282255736Sdavidch#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 3283255736Sdavidch#else 3284255736Sdavidch#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value; 3285255736Sdavidch#endif // _LP64 3286255736Sdavidch 3287255736Sdavidch// Implementation macros 3288255736Sdavidch#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value; 3289255736Sdavidch#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name; 3290255736Sdavidch#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value; 3291255736Sdavidch#define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value; 3292255736Sdavidch#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value; 3293255736Sdavidch#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value; 3294255736Sdavidch#ifdef PRODUCT 3295255736Sdavidch#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) /* flag name is constant */ 3296255736Sdavidch#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) /* flag name is constant */ 3297255736Sdavidch#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) 3298255736Sdavidch#else 3299255736Sdavidch#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value; 3300255736Sdavidch#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name; 3301255736Sdavidch#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value; 3302255736Sdavidch#endif 3303255736Sdavidch#ifdef _LP64 3304255736Sdavidch#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value; 3305255736Sdavidch#else 3306255736Sdavidch#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */ 3307255736Sdavidch#endif // _LP64 3308255736Sdavidch 3309255736SdavidchRUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG, DECLARE_LP64_PRODUCT_FLAG) 3310255736Sdavidch 3311255736SdavidchRUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG) 3312255736Sdavidch