methodData.hpp revision 10159:832fc8bf51cb
1/*
2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_OOPS_METHODDATAOOP_HPP
26#define SHARE_VM_OOPS_METHODDATAOOP_HPP
27
28#include "interpreter/bytecodes.hpp"
29#include "memory/universe.hpp"
30#include "oops/method.hpp"
31#include "oops/oop.hpp"
32#include "runtime/orderAccess.hpp"
33
34class BytecodeStream;
35class KlassSizeStats;
36
37// The MethodData object collects counts and other profile information
38// during zeroth-tier (interpretive) and first-tier execution.
39// The profile is used later by compilation heuristics.  Some heuristics
40// enable use of aggressive (or "heroic") optimizations.  An aggressive
41// optimization often has a down-side, a corner case that it handles
42// poorly, but which is thought to be rare.  The profile provides
43// evidence of this rarity for a given method or even BCI.  It allows
44// the compiler to back out of the optimization at places where it
45// has historically been a poor choice.  Other heuristics try to use
46// specific information gathered about types observed at a given site.
47//
48// All data in the profile is approximate.  It is expected to be accurate
49// on the whole, but the system expects occasional inaccuraces, due to
50// counter overflow, multiprocessor races during data collection, space
51// limitations, missing MDO blocks, etc.  Bad or missing data will degrade
52// optimization quality but will not affect correctness.  Also, each MDO
53// is marked with its birth-date ("creation_mileage") which can be used
54// to assess the quality ("maturity") of its data.
55//
56// Short (<32-bit) counters are designed to overflow to a known "saturated"
57// state.  Also, certain recorded per-BCI events are given one-bit counters
58// which overflow to a saturated state which applied to all counters at
59// that BCI.  In other words, there is a small lattice which approximates
60// the ideal of an infinite-precision counter for each event at each BCI,
61// and the lattice quickly "bottoms out" in a state where all counters
62// are taken to be indefinitely large.
63//
64// The reader will find many data races in profile gathering code, starting
65// with invocation counter incrementation.  None of these races harm correct
66// execution of the compiled code.
67
68// forward decl
69class ProfileData;
70
71// DataLayout
72//
73// Overlay for generic profiling data.
74class DataLayout VALUE_OBJ_CLASS_SPEC {
75  friend class VMStructs;
76  friend class JVMCIVMStructs;
77
78private:
79  // Every data layout begins with a header.  This header
80  // contains a tag, which is used to indicate the size/layout
81  // of the data, 4 bits of flags, which can be used in any way,
82  // 4 bits of trap history (none/one reason/many reasons),
83  // and a bci, which is used to tie this piece of data to a
84  // specific bci in the bytecodes.
85  union {
86    intptr_t _bits;
87    struct {
88      u1 _tag;
89      u1 _flags;
90      u2 _bci;
91    } _struct;
92  } _header;
93
94  // The data layout has an arbitrary number of cells, each sized
95  // to accomodate a pointer or an integer.
96  intptr_t _cells[1];
97
98  // Some types of data layouts need a length field.
99  static bool needs_array_len(u1 tag);
100
101public:
102  enum {
103    counter_increment = 1
104  };
105
106  enum {
107    cell_size = sizeof(intptr_t)
108  };
109
110  // Tag values
111  enum {
112    no_tag,
113    bit_data_tag,
114    counter_data_tag,
115    jump_data_tag,
116    receiver_type_data_tag,
117    virtual_call_data_tag,
118    ret_data_tag,
119    branch_data_tag,
120    multi_branch_data_tag,
121    arg_info_data_tag,
122    call_type_data_tag,
123    virtual_call_type_data_tag,
124    parameters_type_data_tag,
125    speculative_trap_data_tag
126  };
127
128  enum {
129    // The _struct._flags word is formatted as [trap_state:4 | flags:4].
130    // The trap state breaks down further as [recompile:1 | reason:3].
131    // This further breakdown is defined in deoptimization.cpp.
132    // See Deoptimization::trap_state_reason for an assert that
133    // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
134    //
135    // The trap_state is collected only if ProfileTraps is true.
136    trap_bits = 1+3,  // 3: enough to distinguish [0..Reason_RECORDED_LIMIT].
137    trap_shift = BitsPerByte - trap_bits,
138    trap_mask = right_n_bits(trap_bits),
139    trap_mask_in_place = (trap_mask << trap_shift),
140    flag_limit = trap_shift,
141    flag_mask = right_n_bits(flag_limit),
142    first_flag = 0
143  };
144
145  // Size computation
146  static int header_size_in_bytes() {
147    return cell_size;
148  }
149  static int header_size_in_cells() {
150    return 1;
151  }
152
153  static int compute_size_in_bytes(int cell_count) {
154    return header_size_in_bytes() + cell_count * cell_size;
155  }
156
157  // Initialization
158  void initialize(u1 tag, u2 bci, int cell_count);
159
160  // Accessors
161  u1 tag() {
162    return _header._struct._tag;
163  }
164
165  // Return a few bits of trap state.  Range is [0..trap_mask].
166  // The state tells if traps with zero, one, or many reasons have occurred.
167  // It also tells whether zero or many recompilations have occurred.
168  // The associated trap histogram in the MDO itself tells whether
169  // traps are common or not.  If a BCI shows that a trap X has
170  // occurred, and the MDO shows N occurrences of X, we make the
171  // simplifying assumption that all N occurrences can be blamed
172  // on that BCI.
173  int trap_state() const {
174    return ((_header._struct._flags >> trap_shift) & trap_mask);
175  }
176
177  void set_trap_state(int new_state) {
178    assert(ProfileTraps, "used only under +ProfileTraps");
179    uint old_flags = (_header._struct._flags & flag_mask);
180    _header._struct._flags = (new_state << trap_shift) | old_flags;
181  }
182
183  u1 flags() const {
184    return _header._struct._flags;
185  }
186
187  u2 bci() const {
188    return _header._struct._bci;
189  }
190
191  void set_header(intptr_t value) {
192    _header._bits = value;
193  }
194  intptr_t header() {
195    return _header._bits;
196  }
197  void set_cell_at(int index, intptr_t value) {
198    _cells[index] = value;
199  }
200  void release_set_cell_at(int index, intptr_t value) {
201    OrderAccess::release_store_ptr(&_cells[index], value);
202  }
203  intptr_t cell_at(int index) const {
204    return _cells[index];
205  }
206
207  void set_flag_at(int flag_number) {
208    assert(flag_number < flag_limit, "oob");
209    _header._struct._flags |= (0x1 << flag_number);
210  }
211  bool flag_at(int flag_number) const {
212    assert(flag_number < flag_limit, "oob");
213    return (_header._struct._flags & (0x1 << flag_number)) != 0;
214  }
215
216  // Low-level support for code generation.
217  static ByteSize header_offset() {
218    return byte_offset_of(DataLayout, _header);
219  }
220  static ByteSize tag_offset() {
221    return byte_offset_of(DataLayout, _header._struct._tag);
222  }
223  static ByteSize flags_offset() {
224    return byte_offset_of(DataLayout, _header._struct._flags);
225  }
226  static ByteSize bci_offset() {
227    return byte_offset_of(DataLayout, _header._struct._bci);
228  }
229  static ByteSize cell_offset(int index) {
230    return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
231  }
232#ifdef CC_INTERP
233  static int cell_offset_in_bytes(int index) {
234    return (int)offset_of(DataLayout, _cells[index]);
235  }
236#endif // CC_INTERP
237  // Return a value which, when or-ed as a byte into _flags, sets the flag.
238  static int flag_number_to_byte_constant(int flag_number) {
239    assert(0 <= flag_number && flag_number < flag_limit, "oob");
240    DataLayout temp; temp.set_header(0);
241    temp.set_flag_at(flag_number);
242    return temp._header._struct._flags;
243  }
244  // Return a value which, when or-ed as a word into _header, sets the flag.
245  static intptr_t flag_mask_to_header_mask(int byte_constant) {
246    DataLayout temp; temp.set_header(0);
247    temp._header._struct._flags = byte_constant;
248    return temp._header._bits;
249  }
250
251  ProfileData* data_in();
252
253  // GC support
254  void clean_weak_klass_links(BoolObjectClosure* cl);
255
256  // Redefinition support
257  void clean_weak_method_links();
258  DEBUG_ONLY(void verify_clean_weak_method_links();)
259};
260
261
262// ProfileData class hierarchy
263class ProfileData;
264class   BitData;
265class     CounterData;
266class       ReceiverTypeData;
267class         VirtualCallData;
268class           VirtualCallTypeData;
269class       RetData;
270class       CallTypeData;
271class   JumpData;
272class     BranchData;
273class   ArrayData;
274class     MultiBranchData;
275class     ArgInfoData;
276class     ParametersTypeData;
277class   SpeculativeTrapData;
278
279// ProfileData
280//
281// A ProfileData object is created to refer to a section of profiling
282// data in a structured way.
283class ProfileData : public ResourceObj {
284  friend class TypeEntries;
285  friend class ReturnTypeEntry;
286  friend class TypeStackSlotEntries;
287private:
288  enum {
289    tab_width_one = 16,
290    tab_width_two = 36
291  };
292
293  // This is a pointer to a section of profiling data.
294  DataLayout* _data;
295
296  char* print_data_on_helper(const MethodData* md) const;
297
298protected:
299  DataLayout* data() { return _data; }
300  const DataLayout* data() const { return _data; }
301
302  enum {
303    cell_size = DataLayout::cell_size
304  };
305
306public:
307  // How many cells are in this?
308  virtual int cell_count() const {
309    ShouldNotReachHere();
310    return -1;
311  }
312
313  // Return the size of this data.
314  int size_in_bytes() {
315    return DataLayout::compute_size_in_bytes(cell_count());
316  }
317
318protected:
319  // Low-level accessors for underlying data
320  void set_intptr_at(int index, intptr_t value) {
321    assert(0 <= index && index < cell_count(), "oob");
322    data()->set_cell_at(index, value);
323  }
324  void release_set_intptr_at(int index, intptr_t value) {
325    assert(0 <= index && index < cell_count(), "oob");
326    data()->release_set_cell_at(index, value);
327  }
328  intptr_t intptr_at(int index) const {
329    assert(0 <= index && index < cell_count(), "oob");
330    return data()->cell_at(index);
331  }
332  void set_uint_at(int index, uint value) {
333    set_intptr_at(index, (intptr_t) value);
334  }
335  void release_set_uint_at(int index, uint value) {
336    release_set_intptr_at(index, (intptr_t) value);
337  }
338  uint uint_at(int index) const {
339    return (uint)intptr_at(index);
340  }
341  void set_int_at(int index, int value) {
342    set_intptr_at(index, (intptr_t) value);
343  }
344  void release_set_int_at(int index, int value) {
345    release_set_intptr_at(index, (intptr_t) value);
346  }
347  int int_at(int index) const {
348    return (int)intptr_at(index);
349  }
350  int int_at_unchecked(int index) const {
351    return (int)data()->cell_at(index);
352  }
353  void set_oop_at(int index, oop value) {
354    set_intptr_at(index, cast_from_oop<intptr_t>(value));
355  }
356  oop oop_at(int index) const {
357    return cast_to_oop(intptr_at(index));
358  }
359
360  void set_flag_at(int flag_number) {
361    data()->set_flag_at(flag_number);
362  }
363  bool flag_at(int flag_number) const {
364    return data()->flag_at(flag_number);
365  }
366
367  // two convenient imports for use by subclasses:
368  static ByteSize cell_offset(int index) {
369    return DataLayout::cell_offset(index);
370  }
371  static int flag_number_to_byte_constant(int flag_number) {
372    return DataLayout::flag_number_to_byte_constant(flag_number);
373  }
374
375  ProfileData(DataLayout* data) {
376    _data = data;
377  }
378
379#ifdef CC_INTERP
380  // Static low level accessors for DataLayout with ProfileData's semantics.
381
382  static int cell_offset_in_bytes(int index) {
383    return DataLayout::cell_offset_in_bytes(index);
384  }
385
386  static void increment_uint_at_no_overflow(DataLayout* layout, int index,
387                                            int inc = DataLayout::counter_increment) {
388    uint count = ((uint)layout->cell_at(index)) + inc;
389    if (count == 0) return;
390    layout->set_cell_at(index, (intptr_t) count);
391  }
392
393  static int int_at(DataLayout* layout, int index) {
394    return (int)layout->cell_at(index);
395  }
396
397  static int uint_at(DataLayout* layout, int index) {
398    return (uint)layout->cell_at(index);
399  }
400
401  static oop oop_at(DataLayout* layout, int index) {
402    return cast_to_oop(layout->cell_at(index));
403  }
404
405  static void set_intptr_at(DataLayout* layout, int index, intptr_t value) {
406    layout->set_cell_at(index, (intptr_t) value);
407  }
408
409  static void set_flag_at(DataLayout* layout, int flag_number) {
410    layout->set_flag_at(flag_number);
411  }
412#endif // CC_INTERP
413
414public:
415  // Constructor for invalid ProfileData.
416  ProfileData();
417
418  u2 bci() const {
419    return data()->bci();
420  }
421
422  address dp() {
423    return (address)_data;
424  }
425
426  int trap_state() const {
427    return data()->trap_state();
428  }
429  void set_trap_state(int new_state) {
430    data()->set_trap_state(new_state);
431  }
432
433  // Type checking
434  virtual bool is_BitData()         const { return false; }
435  virtual bool is_CounterData()     const { return false; }
436  virtual bool is_JumpData()        const { return false; }
437  virtual bool is_ReceiverTypeData()const { return false; }
438  virtual bool is_VirtualCallData() const { return false; }
439  virtual bool is_RetData()         const { return false; }
440  virtual bool is_BranchData()      const { return false; }
441  virtual bool is_ArrayData()       const { return false; }
442  virtual bool is_MultiBranchData() const { return false; }
443  virtual bool is_ArgInfoData()     const { return false; }
444  virtual bool is_CallTypeData()    const { return false; }
445  virtual bool is_VirtualCallTypeData()const { return false; }
446  virtual bool is_ParametersTypeData() const { return false; }
447  virtual bool is_SpeculativeTrapData()const { return false; }
448
449
450  BitData* as_BitData() const {
451    assert(is_BitData(), "wrong type");
452    return is_BitData()         ? (BitData*)        this : NULL;
453  }
454  CounterData* as_CounterData() const {
455    assert(is_CounterData(), "wrong type");
456    return is_CounterData()     ? (CounterData*)    this : NULL;
457  }
458  JumpData* as_JumpData() const {
459    assert(is_JumpData(), "wrong type");
460    return is_JumpData()        ? (JumpData*)       this : NULL;
461  }
462  ReceiverTypeData* as_ReceiverTypeData() const {
463    assert(is_ReceiverTypeData(), "wrong type");
464    return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
465  }
466  VirtualCallData* as_VirtualCallData() const {
467    assert(is_VirtualCallData(), "wrong type");
468    return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
469  }
470  RetData* as_RetData() const {
471    assert(is_RetData(), "wrong type");
472    return is_RetData()         ? (RetData*)        this : NULL;
473  }
474  BranchData* as_BranchData() const {
475    assert(is_BranchData(), "wrong type");
476    return is_BranchData()      ? (BranchData*)     this : NULL;
477  }
478  ArrayData* as_ArrayData() const {
479    assert(is_ArrayData(), "wrong type");
480    return is_ArrayData()       ? (ArrayData*)      this : NULL;
481  }
482  MultiBranchData* as_MultiBranchData() const {
483    assert(is_MultiBranchData(), "wrong type");
484    return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
485  }
486  ArgInfoData* as_ArgInfoData() const {
487    assert(is_ArgInfoData(), "wrong type");
488    return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
489  }
490  CallTypeData* as_CallTypeData() const {
491    assert(is_CallTypeData(), "wrong type");
492    return is_CallTypeData() ? (CallTypeData*)this : NULL;
493  }
494  VirtualCallTypeData* as_VirtualCallTypeData() const {
495    assert(is_VirtualCallTypeData(), "wrong type");
496    return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
497  }
498  ParametersTypeData* as_ParametersTypeData() const {
499    assert(is_ParametersTypeData(), "wrong type");
500    return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
501  }
502  SpeculativeTrapData* as_SpeculativeTrapData() const {
503    assert(is_SpeculativeTrapData(), "wrong type");
504    return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL;
505  }
506
507
508  // Subclass specific initialization
509  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
510
511  // GC support
512  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {}
513
514  // Redefinition support
515  virtual void clean_weak_method_links() {}
516  DEBUG_ONLY(virtual void verify_clean_weak_method_links() {})
517
518  // CI translation: ProfileData can represent both MethodDataOop data
519  // as well as CIMethodData data. This function is provided for translating
520  // an oop in a ProfileData to the ci equivalent. Generally speaking,
521  // most ProfileData don't require any translation, so we provide the null
522  // translation here, and the required translators are in the ci subclasses.
523  virtual void translate_from(const ProfileData* data) {}
524
525  virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
526    ShouldNotReachHere();
527  }
528
529  void print_data_on(outputStream* st, const MethodData* md) const;
530
531  void print_shared(outputStream* st, const char* name, const char* extra) const;
532  void tab(outputStream* st, bool first = false) const;
533};
534
535// BitData
536//
537// A BitData holds a flag or two in its header.
538class BitData : public ProfileData {
539  friend class VMStructs;
540  friend class JVMCIVMStructs;
541protected:
542  enum {
543    // null_seen:
544    //  saw a null operand (cast/aastore/instanceof)
545      null_seen_flag              = DataLayout::first_flag + 0
546#if INCLUDE_JVMCI
547    // bytecode threw any exception
548    , exception_seen_flag         = null_seen_flag + 1
549#endif
550  };
551  enum { bit_cell_count = 0 };  // no additional data fields needed.
552public:
553  BitData(DataLayout* layout) : ProfileData(layout) {
554  }
555
556  virtual bool is_BitData() const { return true; }
557
558  static int static_cell_count() {
559    return bit_cell_count;
560  }
561
562  virtual int cell_count() const {
563    return static_cell_count();
564  }
565
566  // Accessor
567
568  // The null_seen flag bit is specially known to the interpreter.
569  // Consulting it allows the compiler to avoid setting up null_check traps.
570  bool null_seen()     { return flag_at(null_seen_flag); }
571  void set_null_seen()    { set_flag_at(null_seen_flag); }
572
573#if INCLUDE_JVMCI
574  // true if an exception was thrown at the specific BCI
575  bool exception_seen() { return flag_at(exception_seen_flag); }
576  void set_exception_seen() { set_flag_at(exception_seen_flag); }
577#endif
578
579  // Code generation support
580  static int null_seen_byte_constant() {
581    return flag_number_to_byte_constant(null_seen_flag);
582  }
583
584  static ByteSize bit_data_size() {
585    return cell_offset(bit_cell_count);
586  }
587
588#ifdef CC_INTERP
589  static int bit_data_size_in_bytes() {
590    return cell_offset_in_bytes(bit_cell_count);
591  }
592
593  static void set_null_seen(DataLayout* layout) {
594    set_flag_at(layout, null_seen_flag);
595  }
596
597  static DataLayout* advance(DataLayout* layout) {
598    return (DataLayout*) (((address)layout) + (ssize_t)BitData::bit_data_size_in_bytes());
599  }
600#endif // CC_INTERP
601
602  void print_data_on(outputStream* st, const char* extra = NULL) const;
603};
604
605// CounterData
606//
607// A CounterData corresponds to a simple counter.
608class CounterData : public BitData {
609  friend class VMStructs;
610  friend class JVMCIVMStructs;
611protected:
612  enum {
613    count_off,
614    counter_cell_count
615  };
616public:
617  CounterData(DataLayout* layout) : BitData(layout) {}
618
619  virtual bool is_CounterData() const { return true; }
620
621  static int static_cell_count() {
622    return counter_cell_count;
623  }
624
625  virtual int cell_count() const {
626    return static_cell_count();
627  }
628
629  // Direct accessor
630  uint count() const {
631    return uint_at(count_off);
632  }
633
634  // Code generation support
635  static ByteSize count_offset() {
636    return cell_offset(count_off);
637  }
638  static ByteSize counter_data_size() {
639    return cell_offset(counter_cell_count);
640  }
641
642  void set_count(uint count) {
643    set_uint_at(count_off, count);
644  }
645
646#ifdef CC_INTERP
647  static int counter_data_size_in_bytes() {
648    return cell_offset_in_bytes(counter_cell_count);
649  }
650
651  static void increment_count_no_overflow(DataLayout* layout) {
652    increment_uint_at_no_overflow(layout, count_off);
653  }
654
655  // Support counter decrementation at checkcast / subtype check failed.
656  static void decrement_count(DataLayout* layout) {
657    increment_uint_at_no_overflow(layout, count_off, -1);
658  }
659
660  static DataLayout* advance(DataLayout* layout) {
661    return (DataLayout*) (((address)layout) + (ssize_t)CounterData::counter_data_size_in_bytes());
662  }
663#endif // CC_INTERP
664
665  void print_data_on(outputStream* st, const char* extra = NULL) const;
666};
667
668// JumpData
669//
670// A JumpData is used to access profiling information for a direct
671// branch.  It is a counter, used for counting the number of branches,
672// plus a data displacement, used for realigning the data pointer to
673// the corresponding target bci.
674class JumpData : public ProfileData {
675  friend class VMStructs;
676  friend class JVMCIVMStructs;
677protected:
678  enum {
679    taken_off_set,
680    displacement_off_set,
681    jump_cell_count
682  };
683
684  void set_displacement(int displacement) {
685    set_int_at(displacement_off_set, displacement);
686  }
687
688public:
689  JumpData(DataLayout* layout) : ProfileData(layout) {
690    assert(layout->tag() == DataLayout::jump_data_tag ||
691      layout->tag() == DataLayout::branch_data_tag, "wrong type");
692  }
693
694  virtual bool is_JumpData() const { return true; }
695
696  static int static_cell_count() {
697    return jump_cell_count;
698  }
699
700  virtual int cell_count() const {
701    return static_cell_count();
702  }
703
704  // Direct accessor
705  uint taken() const {
706    return uint_at(taken_off_set);
707  }
708
709  void set_taken(uint cnt) {
710    set_uint_at(taken_off_set, cnt);
711  }
712
713  // Saturating counter
714  uint inc_taken() {
715    uint cnt = taken() + 1;
716    // Did we wrap? Will compiler screw us??
717    if (cnt == 0) cnt--;
718    set_uint_at(taken_off_set, cnt);
719    return cnt;
720  }
721
722  int displacement() const {
723    return int_at(displacement_off_set);
724  }
725
726  // Code generation support
727  static ByteSize taken_offset() {
728    return cell_offset(taken_off_set);
729  }
730
731  static ByteSize displacement_offset() {
732    return cell_offset(displacement_off_set);
733  }
734
735#ifdef CC_INTERP
736  static void increment_taken_count_no_overflow(DataLayout* layout) {
737    increment_uint_at_no_overflow(layout, taken_off_set);
738  }
739
740  static DataLayout* advance_taken(DataLayout* layout) {
741    return (DataLayout*) (((address)layout) + (ssize_t)int_at(layout, displacement_off_set));
742  }
743
744  static uint taken_count(DataLayout* layout) {
745    return (uint) uint_at(layout, taken_off_set);
746  }
747#endif // CC_INTERP
748
749  // Specific initialization.
750  void post_initialize(BytecodeStream* stream, MethodData* mdo);
751
752  void print_data_on(outputStream* st, const char* extra = NULL) const;
753};
754
755// Entries in a ProfileData object to record types: it can either be
756// none (no profile), unknown (conflicting profile data) or a klass if
757// a single one is seen. Whether a null reference was seen is also
758// recorded. No counter is associated with the type and a single type
759// is tracked (unlike VirtualCallData).
760class TypeEntries {
761
762public:
763
764  // A single cell is used to record information for a type:
765  // - the cell is initialized to 0
766  // - when a type is discovered it is stored in the cell
767  // - bit zero of the cell is used to record whether a null reference
768  // was encountered or not
769  // - bit 1 is set to record a conflict in the type information
770
771  enum {
772    null_seen = 1,
773    type_mask = ~null_seen,
774    type_unknown = 2,
775    status_bits = null_seen | type_unknown,
776    type_klass_mask = ~status_bits
777  };
778
779  // what to initialize a cell to
780  static intptr_t type_none() {
781    return 0;
782  }
783
784  // null seen = bit 0 set?
785  static bool was_null_seen(intptr_t v) {
786    return (v & null_seen) != 0;
787  }
788
789  // conflicting type information = bit 1 set?
790  static bool is_type_unknown(intptr_t v) {
791    return (v & type_unknown) != 0;
792  }
793
794  // not type information yet = all bits cleared, ignoring bit 0?
795  static bool is_type_none(intptr_t v) {
796    return (v & type_mask) == 0;
797  }
798
799  // recorded type: cell without bit 0 and 1
800  static intptr_t klass_part(intptr_t v) {
801    intptr_t r = v & type_klass_mask;
802    return r;
803  }
804
805  // type recorded
806  static Klass* valid_klass(intptr_t k) {
807    if (!is_type_none(k) &&
808        !is_type_unknown(k)) {
809      Klass* res = (Klass*)klass_part(k);
810      assert(res != NULL, "invalid");
811      return res;
812    } else {
813      return NULL;
814    }
815  }
816
817  static intptr_t with_status(intptr_t k, intptr_t in) {
818    return k | (in & status_bits);
819  }
820
821  static intptr_t with_status(Klass* k, intptr_t in) {
822    return with_status((intptr_t)k, in);
823  }
824
825  static void print_klass(outputStream* st, intptr_t k);
826
827  // GC support
828  static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
829
830protected:
831  // ProfileData object these entries are part of
832  ProfileData* _pd;
833  // offset within the ProfileData object where the entries start
834  const int _base_off;
835
836  TypeEntries(int base_off)
837    : _base_off(base_off), _pd(NULL) {}
838
839  void set_intptr_at(int index, intptr_t value) {
840    _pd->set_intptr_at(index, value);
841  }
842
843  intptr_t intptr_at(int index) const {
844    return _pd->intptr_at(index);
845  }
846
847public:
848  void set_profile_data(ProfileData* pd) {
849    _pd = pd;
850  }
851};
852
853// Type entries used for arguments passed at a call and parameters on
854// method entry. 2 cells per entry: one for the type encoded as in
855// TypeEntries and one initialized with the stack slot where the
856// profiled object is to be found so that the interpreter can locate
857// it quickly.
858class TypeStackSlotEntries : public TypeEntries {
859
860private:
861  enum {
862    stack_slot_entry,
863    type_entry,
864    per_arg_cell_count
865  };
866
867  // offset of cell for stack slot for entry i within ProfileData object
868  int stack_slot_offset(int i) const {
869    return _base_off + stack_slot_local_offset(i);
870  }
871
872  const int _number_of_entries;
873
874  // offset of cell for type for entry i within ProfileData object
875  int type_offset_in_cells(int i) const {
876    return _base_off + type_local_offset(i);
877  }
878
879public:
880
881  TypeStackSlotEntries(int base_off, int nb_entries)
882    : TypeEntries(base_off), _number_of_entries(nb_entries) {}
883
884  static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
885
886  void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
887
888  int number_of_entries() const { return _number_of_entries; }
889
890  // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
891  static int stack_slot_local_offset(int i) {
892    return i * per_arg_cell_count + stack_slot_entry;
893  }
894
895  // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
896  static int type_local_offset(int i) {
897    return i * per_arg_cell_count + type_entry;
898  }
899
900  // stack slot for entry i
901  uint stack_slot(int i) const {
902    assert(i >= 0 && i < _number_of_entries, "oob");
903    return _pd->uint_at(stack_slot_offset(i));
904  }
905
906  // set stack slot for entry i
907  void set_stack_slot(int i, uint num) {
908    assert(i >= 0 && i < _number_of_entries, "oob");
909    _pd->set_uint_at(stack_slot_offset(i), num);
910  }
911
912  // type for entry i
913  intptr_t type(int i) const {
914    assert(i >= 0 && i < _number_of_entries, "oob");
915    return _pd->intptr_at(type_offset_in_cells(i));
916  }
917
918  // set type for entry i
919  void set_type(int i, intptr_t k) {
920    assert(i >= 0 && i < _number_of_entries, "oob");
921    _pd->set_intptr_at(type_offset_in_cells(i), k);
922  }
923
924  static ByteSize per_arg_size() {
925    return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
926  }
927
928  static int per_arg_count() {
929    return per_arg_cell_count;
930  }
931
932  ByteSize type_offset(int i) const {
933    return DataLayout::cell_offset(type_offset_in_cells(i));
934  }
935
936  // GC support
937  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
938
939  void print_data_on(outputStream* st) const;
940};
941
942// Type entry used for return from a call. A single cell to record the
943// type.
944class ReturnTypeEntry : public TypeEntries {
945
946private:
947  enum {
948    cell_count = 1
949  };
950
951public:
952  ReturnTypeEntry(int base_off)
953    : TypeEntries(base_off) {}
954
955  void post_initialize() {
956    set_type(type_none());
957  }
958
959  intptr_t type() const {
960    return _pd->intptr_at(_base_off);
961  }
962
963  void set_type(intptr_t k) {
964    _pd->set_intptr_at(_base_off, k);
965  }
966
967  static int static_cell_count() {
968    return cell_count;
969  }
970
971  static ByteSize size() {
972    return in_ByteSize(cell_count * DataLayout::cell_size);
973  }
974
975  ByteSize type_offset() {
976    return DataLayout::cell_offset(_base_off);
977  }
978
979  // GC support
980  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
981
982  void print_data_on(outputStream* st) const;
983};
984
985// Entries to collect type information at a call: contains arguments
986// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
987// number of cells. Because the number of cells for the return type is
988// smaller than the number of cells for the type of an arguments, the
989// number of cells is used to tell how many arguments are profiled and
990// whether a return value is profiled. See has_arguments() and
991// has_return().
992class TypeEntriesAtCall {
993private:
994  static int stack_slot_local_offset(int i) {
995    return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
996  }
997
998  static int argument_type_local_offset(int i) {
999    return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
1000  }
1001
1002public:
1003
1004  static int header_cell_count() {
1005    return 1;
1006  }
1007
1008  static int cell_count_local_offset() {
1009    return 0;
1010  }
1011
1012  static int compute_cell_count(BytecodeStream* stream);
1013
1014  static void initialize(DataLayout* dl, int base, int cell_count) {
1015    int off = base + cell_count_local_offset();
1016    dl->set_cell_at(off, cell_count - base - header_cell_count());
1017  }
1018
1019  static bool arguments_profiling_enabled();
1020  static bool return_profiling_enabled();
1021
1022  // Code generation support
1023  static ByteSize cell_count_offset() {
1024    return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
1025  }
1026
1027  static ByteSize args_data_offset() {
1028    return in_ByteSize(header_cell_count() * DataLayout::cell_size);
1029  }
1030
1031  static ByteSize stack_slot_offset(int i) {
1032    return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
1033  }
1034
1035  static ByteSize argument_type_offset(int i) {
1036    return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
1037  }
1038
1039  static ByteSize return_only_size() {
1040    return ReturnTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size);
1041  }
1042
1043};
1044
1045// CallTypeData
1046//
1047// A CallTypeData is used to access profiling information about a non
1048// virtual call for which we collect type information about arguments
1049// and return value.
1050class CallTypeData : public CounterData {
1051private:
1052  // entries for arguments if any
1053  TypeStackSlotEntries _args;
1054  // entry for return type if any
1055  ReturnTypeEntry _ret;
1056
1057  int cell_count_global_offset() const {
1058    return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1059  }
1060
1061  // number of cells not counting the header
1062  int cell_count_no_header() const {
1063    return uint_at(cell_count_global_offset());
1064  }
1065
1066  void check_number_of_arguments(int total) {
1067    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1068  }
1069
1070public:
1071  CallTypeData(DataLayout* layout) :
1072    CounterData(layout),
1073    _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1074    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
1075  {
1076    assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
1077    // Some compilers (VC++) don't want this passed in member initialization list
1078    _args.set_profile_data(this);
1079    _ret.set_profile_data(this);
1080  }
1081
1082  const TypeStackSlotEntries* args() const {
1083    assert(has_arguments(), "no profiling of arguments");
1084    return &_args;
1085  }
1086
1087  const ReturnTypeEntry* ret() const {
1088    assert(has_return(), "no profiling of return value");
1089    return &_ret;
1090  }
1091
1092  virtual bool is_CallTypeData() const { return true; }
1093
1094  static int static_cell_count() {
1095    return -1;
1096  }
1097
1098  static int compute_cell_count(BytecodeStream* stream) {
1099    return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1100  }
1101
1102  static void initialize(DataLayout* dl, int cell_count) {
1103    TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
1104  }
1105
1106  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1107
1108  virtual int cell_count() const {
1109    return CounterData::static_cell_count() +
1110      TypeEntriesAtCall::header_cell_count() +
1111      int_at_unchecked(cell_count_global_offset());
1112  }
1113
1114  int number_of_arguments() const {
1115    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1116  }
1117
1118  void set_argument_type(int i, Klass* k) {
1119    assert(has_arguments(), "no arguments!");
1120    intptr_t current = _args.type(i);
1121    _args.set_type(i, TypeEntries::with_status(k, current));
1122  }
1123
1124  void set_return_type(Klass* k) {
1125    assert(has_return(), "no return!");
1126    intptr_t current = _ret.type();
1127    _ret.set_type(TypeEntries::with_status(k, current));
1128  }
1129
1130  // An entry for a return value takes less space than an entry for an
1131  // argument so if the number of cells exceeds the number of cells
1132  // needed for an argument, this object contains type information for
1133  // at least one argument.
1134  bool has_arguments() const {
1135    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1136    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1137    return res;
1138  }
1139
1140  // An entry for a return value takes less space than an entry for an
1141  // argument, so if the remainder of the number of cells divided by
1142  // the number of cells for an argument is not null, a return value
1143  // is profiled in this object.
1144  bool has_return() const {
1145    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1146    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1147    return res;
1148  }
1149
1150  // Code generation support
1151  static ByteSize args_data_offset() {
1152    return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1153  }
1154
1155  ByteSize argument_type_offset(int i) {
1156    return _args.type_offset(i);
1157  }
1158
1159  ByteSize return_type_offset() {
1160    return _ret.type_offset();
1161  }
1162
1163  // GC support
1164  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
1165    if (has_arguments()) {
1166      _args.clean_weak_klass_links(is_alive_closure);
1167    }
1168    if (has_return()) {
1169      _ret.clean_weak_klass_links(is_alive_closure);
1170    }
1171  }
1172
1173  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1174};
1175
1176// ReceiverTypeData
1177//
1178// A ReceiverTypeData is used to access profiling information about a
1179// dynamic type check.  It consists of a counter which counts the total times
1180// that the check is reached, and a series of (Klass*, count) pairs
1181// which are used to store a type profile for the receiver of the check.
1182class ReceiverTypeData : public CounterData {
1183  friend class VMStructs;
1184  friend class JVMCIVMStructs;
1185protected:
1186  enum {
1187#if INCLUDE_JVMCI
1188    // Description of the different counters
1189    // ReceiverTypeData for instanceof/checkcast/aastore:
1190    //   C1/C2: count is incremented on type overflow and decremented for failed type checks
1191    //   JVMCI: count decremented for failed type checks and nonprofiled_count is incremented on type overflow
1192    //          TODO (chaeubl): in fact, JVMCI should also increment the count for failed type checks to mimic the C1/C2 behavior
1193    // VirtualCallData for invokevirtual/invokeinterface:
1194    //   C1/C2: count is incremented on type overflow
1195    //   JVMCI: count is incremented on type overflow, nonprofiled_count is incremented on method overflow
1196
1197    // JVMCI is interested in knowing the percentage of type checks involving a type not explicitly in the profile
1198    nonprofiled_count_off_set = counter_cell_count,
1199    receiver0_offset,
1200#else
1201    receiver0_offset = counter_cell_count,
1202#endif
1203    count0_offset,
1204    receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1205  };
1206
1207public:
1208  ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1209    assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1210           layout->tag() == DataLayout::virtual_call_data_tag ||
1211           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1212  }
1213
1214  virtual bool is_ReceiverTypeData() const { return true; }
1215
1216  static int static_cell_count() {
1217    return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count JVMCI_ONLY(+ 1);
1218  }
1219
1220  virtual int cell_count() const {
1221    return static_cell_count();
1222  }
1223
1224  // Direct accessors
1225  static uint row_limit() {
1226    return TypeProfileWidth;
1227  }
1228  static int receiver_cell_index(uint row) {
1229    return receiver0_offset + row * receiver_type_row_cell_count;
1230  }
1231  static int receiver_count_cell_index(uint row) {
1232    return count0_offset + row * receiver_type_row_cell_count;
1233  }
1234
1235  Klass* receiver(uint row) const {
1236    assert(row < row_limit(), "oob");
1237
1238    Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
1239    assert(recv == NULL || recv->is_klass(), "wrong type");
1240    return recv;
1241  }
1242
1243  void set_receiver(uint row, Klass* k) {
1244    assert((uint)row < row_limit(), "oob");
1245    set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
1246  }
1247
1248  uint receiver_count(uint row) const {
1249    assert(row < row_limit(), "oob");
1250    return uint_at(receiver_count_cell_index(row));
1251  }
1252
1253  void set_receiver_count(uint row, uint count) {
1254    assert(row < row_limit(), "oob");
1255    set_uint_at(receiver_count_cell_index(row), count);
1256  }
1257
1258  void clear_row(uint row) {
1259    assert(row < row_limit(), "oob");
1260    // Clear total count - indicator of polymorphic call site.
1261    // The site may look like as monomorphic after that but
1262    // it allow to have more accurate profiling information because
1263    // there was execution phase change since klasses were unloaded.
1264    // If the site is still polymorphic then MDO will be updated
1265    // to reflect it. But it could be the case that the site becomes
1266    // only bimorphic. Then keeping total count not 0 will be wrong.
1267    // Even if we use monomorphic (when it is not) for compilation
1268    // we will only have trap, deoptimization and recompile again
1269    // with updated MDO after executing method in Interpreter.
1270    // An additional receiver will be recorded in the cleaned row
1271    // during next call execution.
1272    //
1273    // Note: our profiling logic works with empty rows in any slot.
1274    // We do sorting a profiling info (ciCallProfile) for compilation.
1275    //
1276    set_count(0);
1277    set_receiver(row, NULL);
1278    set_receiver_count(row, 0);
1279#if INCLUDE_JVMCI
1280    if (!this->is_VirtualCallData()) {
1281      // if this is a ReceiverTypeData for JVMCI, the nonprofiled_count
1282      // must also be reset (see "Description of the different counters" above)
1283      set_nonprofiled_count(0);
1284    }
1285#endif
1286  }
1287
1288  // Code generation support
1289  static ByteSize receiver_offset(uint row) {
1290    return cell_offset(receiver_cell_index(row));
1291  }
1292  static ByteSize receiver_count_offset(uint row) {
1293    return cell_offset(receiver_count_cell_index(row));
1294  }
1295#if INCLUDE_JVMCI
1296  static ByteSize nonprofiled_receiver_count_offset() {
1297    return cell_offset(nonprofiled_count_off_set);
1298  }
1299  uint nonprofiled_count() const {
1300    return uint_at(nonprofiled_count_off_set);
1301  }
1302  void set_nonprofiled_count(uint count) {
1303    set_uint_at(nonprofiled_count_off_set, count);
1304  }
1305#endif // INCLUDE_JVMCI
1306  static ByteSize receiver_type_data_size() {
1307    return cell_offset(static_cell_count());
1308  }
1309
1310  // GC support
1311  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
1312
1313#ifdef CC_INTERP
1314  static int receiver_type_data_size_in_bytes() {
1315    return cell_offset_in_bytes(static_cell_count());
1316  }
1317
1318  static Klass *receiver_unchecked(DataLayout* layout, uint row) {
1319    Klass* recv = (Klass*)layout->cell_at(receiver_cell_index(row));
1320    return recv;
1321  }
1322
1323  static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) {
1324    const int num_rows = row_limit();
1325    // Receiver already exists?
1326    for (int row = 0; row < num_rows; row++) {
1327      if (receiver_unchecked(layout, row) == rcvr) {
1328        increment_uint_at_no_overflow(layout, receiver_count_cell_index(row));
1329        return;
1330      }
1331    }
1332    // New receiver, find a free slot.
1333    for (int row = 0; row < num_rows; row++) {
1334      if (receiver_unchecked(layout, row) == NULL) {
1335        set_intptr_at(layout, receiver_cell_index(row), (intptr_t)rcvr);
1336        increment_uint_at_no_overflow(layout, receiver_count_cell_index(row));
1337        return;
1338      }
1339    }
1340    // Receiver did not match any saved receiver and there is no empty row for it.
1341    // Increment total counter to indicate polymorphic case.
1342    increment_count_no_overflow(layout);
1343  }
1344
1345  static DataLayout* advance(DataLayout* layout) {
1346    return (DataLayout*) (((address)layout) + (ssize_t)ReceiverTypeData::receiver_type_data_size_in_bytes());
1347  }
1348#endif // CC_INTERP
1349
1350  void print_receiver_data_on(outputStream* st) const;
1351  void print_data_on(outputStream* st, const char* extra = NULL) const;
1352};
1353
1354// VirtualCallData
1355//
1356// A VirtualCallData is used to access profiling information about a
1357// virtual call.  For now, it has nothing more than a ReceiverTypeData.
1358class VirtualCallData : public ReceiverTypeData {
1359public:
1360  VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1361    assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1362           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1363  }
1364
1365  virtual bool is_VirtualCallData() const { return true; }
1366
1367  static int static_cell_count() {
1368    // At this point we could add more profile state, e.g., for arguments.
1369    // But for now it's the same size as the base record type.
1370    return ReceiverTypeData::static_cell_count() JVMCI_ONLY(+ (uint) MethodProfileWidth * receiver_type_row_cell_count);
1371  }
1372
1373  virtual int cell_count() const {
1374    return static_cell_count();
1375  }
1376
1377  // Direct accessors
1378  static ByteSize virtual_call_data_size() {
1379    return cell_offset(static_cell_count());
1380  }
1381
1382#ifdef CC_INTERP
1383  static int virtual_call_data_size_in_bytes() {
1384    return cell_offset_in_bytes(static_cell_count());
1385  }
1386
1387  static DataLayout* advance(DataLayout* layout) {
1388    return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes());
1389  }
1390#endif // CC_INTERP
1391
1392#if INCLUDE_JVMCI
1393  static ByteSize method_offset(uint row) {
1394    return cell_offset(method_cell_index(row));
1395  }
1396  static ByteSize method_count_offset(uint row) {
1397    return cell_offset(method_count_cell_index(row));
1398  }
1399  static int method_cell_index(uint row) {
1400    return receiver0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
1401  }
1402  static int method_count_cell_index(uint row) {
1403    return count0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
1404  }
1405  static uint method_row_limit() {
1406    return MethodProfileWidth;
1407  }
1408
1409  Method* method(uint row) const {
1410    assert(row < method_row_limit(), "oob");
1411
1412    Method* method = (Method*)intptr_at(method_cell_index(row));
1413    assert(method == NULL || method->is_method(), "must be");
1414    return method;
1415  }
1416
1417  uint method_count(uint row) const {
1418    assert(row < method_row_limit(), "oob");
1419    return uint_at(method_count_cell_index(row));
1420  }
1421
1422  void set_method(uint row, Method* m) {
1423    assert((uint)row < method_row_limit(), "oob");
1424    set_intptr_at(method_cell_index(row), (uintptr_t)m);
1425  }
1426
1427  void set_method_count(uint row, uint count) {
1428    assert(row < method_row_limit(), "oob");
1429    set_uint_at(method_count_cell_index(row), count);
1430  }
1431
1432  void clear_method_row(uint row) {
1433    assert(row < method_row_limit(), "oob");
1434    // Clear total count - indicator of polymorphic call site (see comment for clear_row() in ReceiverTypeData).
1435    set_nonprofiled_count(0);
1436    set_method(row, NULL);
1437    set_method_count(row, 0);
1438  }
1439
1440  // GC support
1441  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
1442
1443  // Redefinition support
1444  virtual void clean_weak_method_links();
1445#endif // INCLUDE_JVMCI
1446
1447  void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
1448  void print_data_on(outputStream* st, const char* extra = NULL) const;
1449};
1450
1451// VirtualCallTypeData
1452//
1453// A VirtualCallTypeData is used to access profiling information about
1454// a virtual call for which we collect type information about
1455// arguments and return value.
1456class VirtualCallTypeData : public VirtualCallData {
1457private:
1458  // entries for arguments if any
1459  TypeStackSlotEntries _args;
1460  // entry for return type if any
1461  ReturnTypeEntry _ret;
1462
1463  int cell_count_global_offset() const {
1464    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1465  }
1466
1467  // number of cells not counting the header
1468  int cell_count_no_header() const {
1469    return uint_at(cell_count_global_offset());
1470  }
1471
1472  void check_number_of_arguments(int total) {
1473    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1474  }
1475
1476public:
1477  VirtualCallTypeData(DataLayout* layout) :
1478    VirtualCallData(layout),
1479    _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1480    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
1481  {
1482    assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1483    // Some compilers (VC++) don't want this passed in member initialization list
1484    _args.set_profile_data(this);
1485    _ret.set_profile_data(this);
1486  }
1487
1488  const TypeStackSlotEntries* args() const {
1489    assert(has_arguments(), "no profiling of arguments");
1490    return &_args;
1491  }
1492
1493  const ReturnTypeEntry* ret() const {
1494    assert(has_return(), "no profiling of return value");
1495    return &_ret;
1496  }
1497
1498  virtual bool is_VirtualCallTypeData() const { return true; }
1499
1500  static int static_cell_count() {
1501    return -1;
1502  }
1503
1504  static int compute_cell_count(BytecodeStream* stream) {
1505    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1506  }
1507
1508  static void initialize(DataLayout* dl, int cell_count) {
1509    TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
1510  }
1511
1512  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1513
1514  virtual int cell_count() const {
1515    return VirtualCallData::static_cell_count() +
1516      TypeEntriesAtCall::header_cell_count() +
1517      int_at_unchecked(cell_count_global_offset());
1518  }
1519
1520  int number_of_arguments() const {
1521    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1522  }
1523
1524  void set_argument_type(int i, Klass* k) {
1525    assert(has_arguments(), "no arguments!");
1526    intptr_t current = _args.type(i);
1527    _args.set_type(i, TypeEntries::with_status(k, current));
1528  }
1529
1530  void set_return_type(Klass* k) {
1531    assert(has_return(), "no return!");
1532    intptr_t current = _ret.type();
1533    _ret.set_type(TypeEntries::with_status(k, current));
1534  }
1535
1536  // An entry for a return value takes less space than an entry for an
1537  // argument, so if the remainder of the number of cells divided by
1538  // the number of cells for an argument is not null, a return value
1539  // is profiled in this object.
1540  bool has_return() const {
1541    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1542    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1543    return res;
1544  }
1545
1546  // An entry for a return value takes less space than an entry for an
1547  // argument so if the number of cells exceeds the number of cells
1548  // needed for an argument, this object contains type information for
1549  // at least one argument.
1550  bool has_arguments() const {
1551    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1552    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1553    return res;
1554  }
1555
1556  // Code generation support
1557  static ByteSize args_data_offset() {
1558    return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1559  }
1560
1561  ByteSize argument_type_offset(int i) {
1562    return _args.type_offset(i);
1563  }
1564
1565  ByteSize return_type_offset() {
1566    return _ret.type_offset();
1567  }
1568
1569  // GC support
1570  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
1571    ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
1572    if (has_arguments()) {
1573      _args.clean_weak_klass_links(is_alive_closure);
1574    }
1575    if (has_return()) {
1576      _ret.clean_weak_klass_links(is_alive_closure);
1577    }
1578  }
1579
1580  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1581};
1582
1583// RetData
1584//
1585// A RetData is used to access profiling information for a ret bytecode.
1586// It is composed of a count of the number of times that the ret has
1587// been executed, followed by a series of triples of the form
1588// (bci, count, di) which count the number of times that some bci was the
1589// target of the ret and cache a corresponding data displacement.
1590class RetData : public CounterData {
1591protected:
1592  enum {
1593    bci0_offset = counter_cell_count,
1594    count0_offset,
1595    displacement0_offset,
1596    ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1597  };
1598
1599  void set_bci(uint row, int bci) {
1600    assert((uint)row < row_limit(), "oob");
1601    set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1602  }
1603  void release_set_bci(uint row, int bci) {
1604    assert((uint)row < row_limit(), "oob");
1605    // 'release' when setting the bci acts as a valid flag for other
1606    // threads wrt bci_count and bci_displacement.
1607    release_set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1608  }
1609  void set_bci_count(uint row, uint count) {
1610    assert((uint)row < row_limit(), "oob");
1611    set_uint_at(count0_offset + row * ret_row_cell_count, count);
1612  }
1613  void set_bci_displacement(uint row, int disp) {
1614    set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1615  }
1616
1617public:
1618  RetData(DataLayout* layout) : CounterData(layout) {
1619    assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1620  }
1621
1622  virtual bool is_RetData() const { return true; }
1623
1624  enum {
1625    no_bci = -1 // value of bci when bci1/2 are not in use.
1626  };
1627
1628  static int static_cell_count() {
1629    return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
1630  }
1631
1632  virtual int cell_count() const {
1633    return static_cell_count();
1634  }
1635
1636  static uint row_limit() {
1637    return BciProfileWidth;
1638  }
1639  static int bci_cell_index(uint row) {
1640    return bci0_offset + row * ret_row_cell_count;
1641  }
1642  static int bci_count_cell_index(uint row) {
1643    return count0_offset + row * ret_row_cell_count;
1644  }
1645  static int bci_displacement_cell_index(uint row) {
1646    return displacement0_offset + row * ret_row_cell_count;
1647  }
1648
1649  // Direct accessors
1650  int bci(uint row) const {
1651    return int_at(bci_cell_index(row));
1652  }
1653  uint bci_count(uint row) const {
1654    return uint_at(bci_count_cell_index(row));
1655  }
1656  int bci_displacement(uint row) const {
1657    return int_at(bci_displacement_cell_index(row));
1658  }
1659
1660  // Interpreter Runtime support
1661  address fixup_ret(int return_bci, MethodData* mdo);
1662
1663  // Code generation support
1664  static ByteSize bci_offset(uint row) {
1665    return cell_offset(bci_cell_index(row));
1666  }
1667  static ByteSize bci_count_offset(uint row) {
1668    return cell_offset(bci_count_cell_index(row));
1669  }
1670  static ByteSize bci_displacement_offset(uint row) {
1671    return cell_offset(bci_displacement_cell_index(row));
1672  }
1673
1674#ifdef CC_INTERP
1675  static DataLayout* advance(MethodData *md, int bci);
1676#endif // CC_INTERP
1677
1678  // Specific initialization.
1679  void post_initialize(BytecodeStream* stream, MethodData* mdo);
1680
1681  void print_data_on(outputStream* st, const char* extra = NULL) const;
1682};
1683
1684// BranchData
1685//
1686// A BranchData is used to access profiling data for a two-way branch.
1687// It consists of taken and not_taken counts as well as a data displacement
1688// for the taken case.
1689class BranchData : public JumpData {
1690  friend class VMStructs;
1691  friend class JVMCIVMStructs;
1692protected:
1693  enum {
1694    not_taken_off_set = jump_cell_count,
1695    branch_cell_count
1696  };
1697
1698  void set_displacement(int displacement) {
1699    set_int_at(displacement_off_set, displacement);
1700  }
1701
1702public:
1703  BranchData(DataLayout* layout) : JumpData(layout) {
1704    assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
1705  }
1706
1707  virtual bool is_BranchData() const { return true; }
1708
1709  static int static_cell_count() {
1710    return branch_cell_count;
1711  }
1712
1713  virtual int cell_count() const {
1714    return static_cell_count();
1715  }
1716
1717  // Direct accessor
1718  uint not_taken() const {
1719    return uint_at(not_taken_off_set);
1720  }
1721
1722  void set_not_taken(uint cnt) {
1723    set_uint_at(not_taken_off_set, cnt);
1724  }
1725
1726  uint inc_not_taken() {
1727    uint cnt = not_taken() + 1;
1728    // Did we wrap? Will compiler screw us??
1729    if (cnt == 0) cnt--;
1730    set_uint_at(not_taken_off_set, cnt);
1731    return cnt;
1732  }
1733
1734  // Code generation support
1735  static ByteSize not_taken_offset() {
1736    return cell_offset(not_taken_off_set);
1737  }
1738  static ByteSize branch_data_size() {
1739    return cell_offset(branch_cell_count);
1740  }
1741
1742#ifdef CC_INTERP
1743  static int branch_data_size_in_bytes() {
1744    return cell_offset_in_bytes(branch_cell_count);
1745  }
1746
1747  static void increment_not_taken_count_no_overflow(DataLayout* layout) {
1748    increment_uint_at_no_overflow(layout, not_taken_off_set);
1749  }
1750
1751  static DataLayout* advance_not_taken(DataLayout* layout) {
1752    return (DataLayout*) (((address)layout) + (ssize_t)BranchData::branch_data_size_in_bytes());
1753  }
1754#endif // CC_INTERP
1755
1756  // Specific initialization.
1757  void post_initialize(BytecodeStream* stream, MethodData* mdo);
1758
1759  void print_data_on(outputStream* st, const char* extra = NULL) const;
1760};
1761
1762// ArrayData
1763//
1764// A ArrayData is a base class for accessing profiling data which does
1765// not have a statically known size.  It consists of an array length
1766// and an array start.
1767class ArrayData : public ProfileData {
1768  friend class VMStructs;
1769  friend class JVMCIVMStructs;
1770protected:
1771  friend class DataLayout;
1772
1773  enum {
1774    array_len_off_set,
1775    array_start_off_set
1776  };
1777
1778  uint array_uint_at(int index) const {
1779    int aindex = index + array_start_off_set;
1780    return uint_at(aindex);
1781  }
1782  int array_int_at(int index) const {
1783    int aindex = index + array_start_off_set;
1784    return int_at(aindex);
1785  }
1786  oop array_oop_at(int index) const {
1787    int aindex = index + array_start_off_set;
1788    return oop_at(aindex);
1789  }
1790  void array_set_int_at(int index, int value) {
1791    int aindex = index + array_start_off_set;
1792    set_int_at(aindex, value);
1793  }
1794
1795#ifdef CC_INTERP
1796  // Static low level accessors for DataLayout with ArrayData's semantics.
1797
1798  static void increment_array_uint_at_no_overflow(DataLayout* layout, int index) {
1799    int aindex = index + array_start_off_set;
1800    increment_uint_at_no_overflow(layout, aindex);
1801  }
1802
1803  static int array_int_at(DataLayout* layout, int index) {
1804    int aindex = index + array_start_off_set;
1805    return int_at(layout, aindex);
1806  }
1807#endif // CC_INTERP
1808
1809  // Code generation support for subclasses.
1810  static ByteSize array_element_offset(int index) {
1811    return cell_offset(array_start_off_set + index);
1812  }
1813
1814public:
1815  ArrayData(DataLayout* layout) : ProfileData(layout) {}
1816
1817  virtual bool is_ArrayData() const { return true; }
1818
1819  static int static_cell_count() {
1820    return -1;
1821  }
1822
1823  int array_len() const {
1824    return int_at_unchecked(array_len_off_set);
1825  }
1826
1827  virtual int cell_count() const {
1828    return array_len() + 1;
1829  }
1830
1831  // Code generation support
1832  static ByteSize array_len_offset() {
1833    return cell_offset(array_len_off_set);
1834  }
1835  static ByteSize array_start_offset() {
1836    return cell_offset(array_start_off_set);
1837  }
1838};
1839
1840// MultiBranchData
1841//
1842// A MultiBranchData is used to access profiling information for
1843// a multi-way branch (*switch bytecodes).  It consists of a series
1844// of (count, displacement) pairs, which count the number of times each
1845// case was taken and specify the data displacment for each branch target.
1846class MultiBranchData : public ArrayData {
1847  friend class VMStructs;
1848  friend class JVMCIVMStructs;
1849protected:
1850  enum {
1851    default_count_off_set,
1852    default_disaplacement_off_set,
1853    case_array_start
1854  };
1855  enum {
1856    relative_count_off_set,
1857    relative_displacement_off_set,
1858    per_case_cell_count
1859  };
1860
1861  void set_default_displacement(int displacement) {
1862    array_set_int_at(default_disaplacement_off_set, displacement);
1863  }
1864  void set_displacement_at(int index, int displacement) {
1865    array_set_int_at(case_array_start +
1866                     index * per_case_cell_count +
1867                     relative_displacement_off_set,
1868                     displacement);
1869  }
1870
1871public:
1872  MultiBranchData(DataLayout* layout) : ArrayData(layout) {
1873    assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
1874  }
1875
1876  virtual bool is_MultiBranchData() const { return true; }
1877
1878  static int compute_cell_count(BytecodeStream* stream);
1879
1880  int number_of_cases() const {
1881    int alen = array_len() - 2; // get rid of default case here.
1882    assert(alen % per_case_cell_count == 0, "must be even");
1883    return (alen / per_case_cell_count);
1884  }
1885
1886  uint default_count() const {
1887    return array_uint_at(default_count_off_set);
1888  }
1889  int default_displacement() const {
1890    return array_int_at(default_disaplacement_off_set);
1891  }
1892
1893  uint count_at(int index) const {
1894    return array_uint_at(case_array_start +
1895                         index * per_case_cell_count +
1896                         relative_count_off_set);
1897  }
1898  int displacement_at(int index) const {
1899    return array_int_at(case_array_start +
1900                        index * per_case_cell_count +
1901                        relative_displacement_off_set);
1902  }
1903
1904  // Code generation support
1905  static ByteSize default_count_offset() {
1906    return array_element_offset(default_count_off_set);
1907  }
1908  static ByteSize default_displacement_offset() {
1909    return array_element_offset(default_disaplacement_off_set);
1910  }
1911  static ByteSize case_count_offset(int index) {
1912    return case_array_offset() +
1913           (per_case_size() * index) +
1914           relative_count_offset();
1915  }
1916  static ByteSize case_array_offset() {
1917    return array_element_offset(case_array_start);
1918  }
1919  static ByteSize per_case_size() {
1920    return in_ByteSize(per_case_cell_count) * cell_size;
1921  }
1922  static ByteSize relative_count_offset() {
1923    return in_ByteSize(relative_count_off_set) * cell_size;
1924  }
1925  static ByteSize relative_displacement_offset() {
1926    return in_ByteSize(relative_displacement_off_set) * cell_size;
1927  }
1928
1929#ifdef CC_INTERP
1930  static void increment_count_no_overflow(DataLayout* layout, int index) {
1931    if (index == -1) {
1932      increment_array_uint_at_no_overflow(layout, default_count_off_set);
1933    } else {
1934      increment_array_uint_at_no_overflow(layout, case_array_start +
1935                                                  index * per_case_cell_count +
1936                                                  relative_count_off_set);
1937    }
1938  }
1939
1940  static DataLayout* advance(DataLayout* layout, int index) {
1941    if (index == -1) {
1942      return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, default_disaplacement_off_set));
1943    } else {
1944      return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, case_array_start +
1945                                                                              index * per_case_cell_count +
1946                                                                              relative_displacement_off_set));
1947    }
1948  }
1949#endif // CC_INTERP
1950
1951  // Specific initialization.
1952  void post_initialize(BytecodeStream* stream, MethodData* mdo);
1953
1954  void print_data_on(outputStream* st, const char* extra = NULL) const;
1955};
1956
1957class ArgInfoData : public ArrayData {
1958
1959public:
1960  ArgInfoData(DataLayout* layout) : ArrayData(layout) {
1961    assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
1962  }
1963
1964  virtual bool is_ArgInfoData() const { return true; }
1965
1966
1967  int number_of_args() const {
1968    return array_len();
1969  }
1970
1971  uint arg_modified(int arg) const {
1972    return array_uint_at(arg);
1973  }
1974
1975  void set_arg_modified(int arg, uint val) {
1976    array_set_int_at(arg, val);
1977  }
1978
1979  void print_data_on(outputStream* st, const char* extra = NULL) const;
1980};
1981
1982// ParametersTypeData
1983//
1984// A ParametersTypeData is used to access profiling information about
1985// types of parameters to a method
1986class ParametersTypeData : public ArrayData {
1987
1988private:
1989  TypeStackSlotEntries _parameters;
1990
1991  static int stack_slot_local_offset(int i) {
1992    assert_profiling_enabled();
1993    return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
1994  }
1995
1996  static int type_local_offset(int i) {
1997    assert_profiling_enabled();
1998    return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
1999  }
2000
2001  static bool profiling_enabled();
2002  static void assert_profiling_enabled() {
2003    assert(profiling_enabled(), "method parameters profiling should be on");
2004  }
2005
2006public:
2007  ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
2008    assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
2009    // Some compilers (VC++) don't want this passed in member initialization list
2010    _parameters.set_profile_data(this);
2011  }
2012
2013  static int compute_cell_count(Method* m);
2014
2015  virtual bool is_ParametersTypeData() const { return true; }
2016
2017  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
2018
2019  int number_of_parameters() const {
2020    return array_len() / TypeStackSlotEntries::per_arg_count();
2021  }
2022
2023  const TypeStackSlotEntries* parameters() const { return &_parameters; }
2024
2025  uint stack_slot(int i) const {
2026    return _parameters.stack_slot(i);
2027  }
2028
2029  void set_type(int i, Klass* k) {
2030    intptr_t current = _parameters.type(i);
2031    _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
2032  }
2033
2034  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
2035    _parameters.clean_weak_klass_links(is_alive_closure);
2036  }
2037
2038  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
2039
2040  static ByteSize stack_slot_offset(int i) {
2041    return cell_offset(stack_slot_local_offset(i));
2042  }
2043
2044  static ByteSize type_offset(int i) {
2045    return cell_offset(type_local_offset(i));
2046  }
2047};
2048
2049// SpeculativeTrapData
2050//
2051// A SpeculativeTrapData is used to record traps due to type
2052// speculation. It records the root of the compilation: that type
2053// speculation is wrong in the context of one compilation (for
2054// method1) doesn't mean it's wrong in the context of another one (for
2055// method2). Type speculation could have more/different data in the
2056// context of the compilation of method2 and it's worthwhile to try an
2057// optimization that failed for compilation of method1 in the context
2058// of compilation of method2.
2059// Space for SpeculativeTrapData entries is allocated from the extra
2060// data space in the MDO. If we run out of space, the trap data for
2061// the ProfileData at that bci is updated.
2062class SpeculativeTrapData : public ProfileData {
2063protected:
2064  enum {
2065    speculative_trap_method,
2066    speculative_trap_cell_count
2067  };
2068public:
2069  SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
2070    assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
2071  }
2072
2073  virtual bool is_SpeculativeTrapData() const { return true; }
2074
2075  static int static_cell_count() {
2076    return speculative_trap_cell_count;
2077  }
2078
2079  virtual int cell_count() const {
2080    return static_cell_count();
2081  }
2082
2083  // Direct accessor
2084  Method* method() const {
2085    return (Method*)intptr_at(speculative_trap_method);
2086  }
2087
2088  void set_method(Method* m) {
2089    assert(!m->is_old(), "cannot add old methods");
2090    set_intptr_at(speculative_trap_method, (intptr_t)m);
2091  }
2092
2093  static ByteSize method_offset() {
2094    return cell_offset(speculative_trap_method);
2095  }
2096
2097  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
2098};
2099
2100// MethodData*
2101//
2102// A MethodData* holds information which has been collected about
2103// a method.  Its layout looks like this:
2104//
2105// -----------------------------
2106// | header                    |
2107// | klass                     |
2108// -----------------------------
2109// | method                    |
2110// | size of the MethodData* |
2111// -----------------------------
2112// | Data entries...           |
2113// |   (variable size)         |
2114// |                           |
2115// .                           .
2116// .                           .
2117// .                           .
2118// |                           |
2119// -----------------------------
2120//
2121// The data entry area is a heterogeneous array of DataLayouts. Each
2122// DataLayout in the array corresponds to a specific bytecode in the
2123// method.  The entries in the array are sorted by the corresponding
2124// bytecode.  Access to the data is via resource-allocated ProfileData,
2125// which point to the underlying blocks of DataLayout structures.
2126//
2127// During interpretation, if profiling in enabled, the interpreter
2128// maintains a method data pointer (mdp), which points at the entry
2129// in the array corresponding to the current bci.  In the course of
2130// intepretation, when a bytecode is encountered that has profile data
2131// associated with it, the entry pointed to by mdp is updated, then the
2132// mdp is adjusted to point to the next appropriate DataLayout.  If mdp
2133// is NULL to begin with, the interpreter assumes that the current method
2134// is not (yet) being profiled.
2135//
2136// In MethodData* parlance, "dp" is a "data pointer", the actual address
2137// of a DataLayout element.  A "di" is a "data index", the offset in bytes
2138// from the base of the data entry array.  A "displacement" is the byte offset
2139// in certain ProfileData objects that indicate the amount the mdp must be
2140// adjusted in the event of a change in control flow.
2141//
2142
2143CC_INTERP_ONLY(class BytecodeInterpreter;)
2144class CleanExtraDataClosure;
2145
2146class MethodData : public Metadata {
2147  friend class VMStructs;
2148  friend class JVMCIVMStructs;
2149  CC_INTERP_ONLY(friend class BytecodeInterpreter;)
2150private:
2151  friend class ProfileData;
2152
2153  // Back pointer to the Method*
2154  Method* _method;
2155
2156  // Size of this oop in bytes
2157  int _size;
2158
2159  // Cached hint for bci_to_dp and bci_to_data
2160  int _hint_di;
2161
2162  Mutex _extra_data_lock;
2163
2164  MethodData(const methodHandle& method, int size, TRAPS);
2165public:
2166  static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2167  MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
2168
2169  bool is_methodData() const volatile { return true; }
2170  void initialize();
2171
2172  // Whole-method sticky bits and flags
2173  enum {
2174    _trap_hist_limit    = 22 JVMCI_ONLY(+5),   // decoupled from Deoptimization::Reason_LIMIT
2175    _trap_hist_mask     = max_jubyte,
2176    _extra_data_count   = 4     // extra DataLayout headers, for trap history
2177  }; // Public flag values
2178private:
2179  uint _nof_decompiles;             // count of all nmethod removals
2180  uint _nof_overflow_recompiles;    // recompile count, excluding recomp. bits
2181  uint _nof_overflow_traps;         // trap count, excluding _trap_hist
2182  union {
2183    intptr_t _align;
2184    u1 _array[_trap_hist_limit];
2185  } _trap_hist;
2186
2187  // Support for interprocedural escape analysis, from Thomas Kotzmann.
2188  intx              _eflags;          // flags on escape information
2189  intx              _arg_local;       // bit set of non-escaping arguments
2190  intx              _arg_stack;       // bit set of stack-allocatable arguments
2191  intx              _arg_returned;    // bit set of returned arguments
2192
2193  int _creation_mileage;              // method mileage at MDO creation
2194
2195  // How many invocations has this MDO seen?
2196  // These counters are used to determine the exact age of MDO.
2197  // We need those because in tiered a method can be concurrently
2198  // executed at different levels.
2199  InvocationCounter _invocation_counter;
2200  // Same for backedges.
2201  InvocationCounter _backedge_counter;
2202  // Counter values at the time profiling started.
2203  int               _invocation_counter_start;
2204  int               _backedge_counter_start;
2205  uint              _tenure_traps;
2206  int               _invoke_mask;      // per-method Tier0InvokeNotifyFreqLog
2207  int               _backedge_mask;    // per-method Tier0BackedgeNotifyFreqLog
2208
2209#if INCLUDE_RTM_OPT
2210  // State of RTM code generation during compilation of the method
2211  int               _rtm_state;
2212#endif
2213
2214  // Number of loops and blocks is computed when compiling the first
2215  // time with C1. It is used to determine if method is trivial.
2216  short             _num_loops;
2217  short             _num_blocks;
2218  // Does this method contain anything worth profiling?
2219  enum WouldProfile {unknown, no_profile, profile};
2220  WouldProfile      _would_profile;
2221
2222#if INCLUDE_JVMCI
2223  // Support for HotSpotMethodData.setCompiledIRSize(int)
2224  int               _jvmci_ir_size;
2225#endif
2226
2227  // Size of _data array in bytes.  (Excludes header and extra_data fields.)
2228  int _data_size;
2229
2230  // data index for the area dedicated to parameters. -1 if no
2231  // parameter profiling.
2232  enum { no_parameters = -2, parameters_uninitialized = -1 };
2233  int _parameters_type_data_di;
2234  int parameters_size_in_bytes() const {
2235    ParametersTypeData* param = parameters_type_data();
2236    return param == NULL ? 0 : param->size_in_bytes();
2237  }
2238
2239  // Beginning of the data entries
2240  intptr_t _data[1];
2241
2242  // Helper for size computation
2243  static int compute_data_size(BytecodeStream* stream);
2244  static int bytecode_cell_count(Bytecodes::Code code);
2245  static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2246  enum { no_profile_data = -1, variable_cell_count = -2 };
2247
2248  // Helper for initialization
2249  DataLayout* data_layout_at(int data_index) const {
2250    assert(data_index % sizeof(intptr_t) == 0, "unaligned");
2251    return (DataLayout*) (((address)_data) + data_index);
2252  }
2253
2254  // Initialize an individual data segment.  Returns the size of
2255  // the segment in bytes.
2256  int initialize_data(BytecodeStream* stream, int data_index);
2257
2258  // Helper for data_at
2259  DataLayout* limit_data_position() const {
2260    return data_layout_at(_data_size);
2261  }
2262  bool out_of_bounds(int data_index) const {
2263    return data_index >= data_size();
2264  }
2265
2266  // Give each of the data entries a chance to perform specific
2267  // data initialization.
2268  void post_initialize(BytecodeStream* stream);
2269
2270  // hint accessors
2271  int      hint_di() const  { return _hint_di; }
2272  void set_hint_di(int di)  {
2273    assert(!out_of_bounds(di), "hint_di out of bounds");
2274    _hint_di = di;
2275  }
2276  ProfileData* data_before(int bci) {
2277    // avoid SEGV on this edge case
2278    if (data_size() == 0)
2279      return NULL;
2280    int hint = hint_di();
2281    if (data_layout_at(hint)->bci() <= bci)
2282      return data_at(hint);
2283    return first_data();
2284  }
2285
2286  // What is the index of the first data entry?
2287  int first_di() const { return 0; }
2288
2289  ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent);
2290  // Find or create an extra ProfileData:
2291  ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
2292
2293  // return the argument info cell
2294  ArgInfoData *arg_info();
2295
2296  enum {
2297    no_type_profile = 0,
2298    type_profile_jsr292 = 1,
2299    type_profile_all = 2
2300  };
2301
2302  static bool profile_jsr292(const methodHandle& m, int bci);
2303  static int profile_arguments_flag();
2304  static bool profile_all_arguments();
2305  static bool profile_arguments_for_invoke(const methodHandle& m, int bci);
2306  static int profile_return_flag();
2307  static bool profile_all_return();
2308  static bool profile_return_for_invoke(const methodHandle& m, int bci);
2309  static int profile_parameters_flag();
2310  static bool profile_parameters_jsr292_only();
2311  static bool profile_all_parameters();
2312
2313  void clean_extra_data(CleanExtraDataClosure* cl);
2314  void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
2315  void verify_extra_data_clean(CleanExtraDataClosure* cl);
2316
2317public:
2318  static int header_size() {
2319    return sizeof(MethodData)/wordSize;
2320  }
2321
2322  // Compute the size of a MethodData* before it is created.
2323  static int compute_allocation_size_in_bytes(const methodHandle& method);
2324  static int compute_allocation_size_in_words(const methodHandle& method);
2325  static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
2326
2327  // Determine if a given bytecode can have profile information.
2328  static bool bytecode_has_profile(Bytecodes::Code code) {
2329    return bytecode_cell_count(code) != no_profile_data;
2330  }
2331
2332  // reset into original state
2333  void init();
2334
2335  // My size
2336  int size_in_bytes() const { return _size; }
2337  int size() const    { return align_metadata_size(align_size_up(_size, BytesPerWord)/BytesPerWord); }
2338#if INCLUDE_SERVICES
2339  void collect_statistics(KlassSizeStats *sz) const;
2340#endif
2341
2342  int      creation_mileage() const  { return _creation_mileage; }
2343  void set_creation_mileage(int x)   { _creation_mileage = x; }
2344
2345  int invocation_count() {
2346    if (invocation_counter()->carry()) {
2347      return InvocationCounter::count_limit;
2348    }
2349    return invocation_counter()->count();
2350  }
2351  int backedge_count() {
2352    if (backedge_counter()->carry()) {
2353      return InvocationCounter::count_limit;
2354    }
2355    return backedge_counter()->count();
2356  }
2357
2358  int invocation_count_start() {
2359    if (invocation_counter()->carry()) {
2360      return 0;
2361    }
2362    return _invocation_counter_start;
2363  }
2364
2365  int backedge_count_start() {
2366    if (backedge_counter()->carry()) {
2367      return 0;
2368    }
2369    return _backedge_counter_start;
2370  }
2371
2372  int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2373  int backedge_count_delta()   { return backedge_count()   - backedge_count_start();   }
2374
2375  void reset_start_counters() {
2376    _invocation_counter_start = invocation_count();
2377    _backedge_counter_start = backedge_count();
2378  }
2379
2380  InvocationCounter* invocation_counter()     { return &_invocation_counter; }
2381  InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
2382
2383#if INCLUDE_RTM_OPT
2384  int rtm_state() const {
2385    return _rtm_state;
2386  }
2387  void set_rtm_state(RTMState rstate) {
2388    _rtm_state = (int)rstate;
2389  }
2390  void atomic_set_rtm_state(RTMState rstate) {
2391    Atomic::store((int)rstate, &_rtm_state);
2392  }
2393
2394  static int rtm_state_offset_in_bytes() {
2395    return offset_of(MethodData, _rtm_state);
2396  }
2397#endif
2398
2399  void set_would_profile(bool p)              { _would_profile = p ? profile : no_profile; }
2400  bool would_profile() const                  { return _would_profile != no_profile; }
2401
2402  int num_loops() const                       { return _num_loops;  }
2403  void set_num_loops(int n)                   { _num_loops = n;     }
2404  int num_blocks() const                      { return _num_blocks; }
2405  void set_num_blocks(int n)                  { _num_blocks = n;    }
2406
2407  bool is_mature() const;  // consult mileage and ProfileMaturityPercentage
2408  static int mileage_of(Method* m);
2409
2410  // Support for interprocedural escape analysis, from Thomas Kotzmann.
2411  enum EscapeFlag {
2412    estimated    = 1 << 0,
2413    return_local = 1 << 1,
2414    return_allocated = 1 << 2,
2415    allocated_escapes = 1 << 3,
2416    unknown_modified = 1 << 4
2417  };
2418
2419  intx eflags()                                  { return _eflags; }
2420  intx arg_local()                               { return _arg_local; }
2421  intx arg_stack()                               { return _arg_stack; }
2422  intx arg_returned()                            { return _arg_returned; }
2423  uint arg_modified(int a)                       { ArgInfoData *aid = arg_info();
2424                                                   assert(aid != NULL, "arg_info must be not null");
2425                                                   assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2426                                                   return aid->arg_modified(a); }
2427
2428  void set_eflags(intx v)                        { _eflags = v; }
2429  void set_arg_local(intx v)                     { _arg_local = v; }
2430  void set_arg_stack(intx v)                     { _arg_stack = v; }
2431  void set_arg_returned(intx v)                  { _arg_returned = v; }
2432  void set_arg_modified(int a, uint v)           { ArgInfoData *aid = arg_info();
2433                                                   assert(aid != NULL, "arg_info must be not null");
2434                                                   assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2435                                                   aid->set_arg_modified(a, v); }
2436
2437  void clear_escape_info()                       { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
2438
2439  // Location and size of data area
2440  address data_base() const {
2441    return (address) _data;
2442  }
2443  int data_size() const {
2444    return _data_size;
2445  }
2446
2447  // Accessors
2448  Method* method() const { return _method; }
2449
2450  // Get the data at an arbitrary (sort of) data index.
2451  ProfileData* data_at(int data_index) const;
2452
2453  // Walk through the data in order.
2454  ProfileData* first_data() const { return data_at(first_di()); }
2455  ProfileData* next_data(ProfileData* current) const;
2456  bool is_valid(ProfileData* current) const { return current != NULL; }
2457
2458  // Convert a dp (data pointer) to a di (data index).
2459  int dp_to_di(address dp) const {
2460    return dp - ((address)_data);
2461  }
2462
2463  // bci to di/dp conversion.
2464  address bci_to_dp(int bci);
2465  int bci_to_di(int bci) {
2466    return dp_to_di(bci_to_dp(bci));
2467  }
2468
2469  // Get the data at an arbitrary bci, or NULL if there is none.
2470  ProfileData* bci_to_data(int bci);
2471
2472  // Same, but try to create an extra_data record if one is needed:
2473  ProfileData* allocate_bci_to_data(int bci, Method* m) {
2474    ProfileData* data = NULL;
2475    // If m not NULL, try to allocate a SpeculativeTrapData entry
2476    if (m == NULL) {
2477      data = bci_to_data(bci);
2478    }
2479    if (data != NULL) {
2480      return data;
2481    }
2482    data = bci_to_extra_data(bci, m, true);
2483    if (data != NULL) {
2484      return data;
2485    }
2486    // If SpeculativeTrapData allocation fails try to allocate a
2487    // regular entry
2488    data = bci_to_data(bci);
2489    if (data != NULL) {
2490      return data;
2491    }
2492    return bci_to_extra_data(bci, NULL, true);
2493  }
2494
2495  // Add a handful of extra data records, for trap tracking.
2496  DataLayout* extra_data_base() const  { return limit_data_position(); }
2497  DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2498  DataLayout* args_data_limit() const  { return (DataLayout*)((address)this + size_in_bytes() -
2499                                                              parameters_size_in_bytes()); }
2500  int extra_data_size() const          { return (address)extra_data_limit() - (address)extra_data_base(); }
2501  static DataLayout* next_extra(DataLayout* dp);
2502
2503  // Return (uint)-1 for overflow.
2504  uint trap_count(int reason) const {
2505    assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
2506    return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2507  }
2508  // For loops:
2509  static uint trap_reason_limit() { return _trap_hist_limit; }
2510  static uint trap_count_limit()  { return _trap_hist_mask; }
2511  uint inc_trap_count(int reason) {
2512    // Count another trap, anywhere in this method.
2513    assert(reason >= 0, "must be single trap");
2514    assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
2515    uint cnt1 = 1 + _trap_hist._array[reason];
2516    if ((cnt1 & _trap_hist_mask) != 0) {  // if no counter overflow...
2517      _trap_hist._array[reason] = cnt1;
2518      return cnt1;
2519    } else {
2520      return _trap_hist_mask + (++_nof_overflow_traps);
2521    }
2522  }
2523
2524  uint overflow_trap_count() const {
2525    return _nof_overflow_traps;
2526  }
2527  uint overflow_recompile_count() const {
2528    return _nof_overflow_recompiles;
2529  }
2530  void inc_overflow_recompile_count() {
2531    _nof_overflow_recompiles += 1;
2532  }
2533  uint decompile_count() const {
2534    return _nof_decompiles;
2535  }
2536  void inc_decompile_count() {
2537    _nof_decompiles += 1;
2538    if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
2539      method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff");
2540    }
2541  }
2542  uint tenure_traps() const {
2543    return _tenure_traps;
2544  }
2545  void inc_tenure_traps() {
2546    _tenure_traps += 1;
2547  }
2548
2549  // Return pointer to area dedicated to parameters in MDO
2550  ParametersTypeData* parameters_type_data() const {
2551    assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2552    return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
2553  }
2554
2555  int parameters_type_data_di() const {
2556    assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data");
2557    return _parameters_type_data_di;
2558  }
2559
2560  // Support for code generation
2561  static ByteSize data_offset() {
2562    return byte_offset_of(MethodData, _data[0]);
2563  }
2564
2565  static ByteSize trap_history_offset() {
2566    return byte_offset_of(MethodData, _trap_hist._array);
2567  }
2568
2569  static ByteSize invocation_counter_offset() {
2570    return byte_offset_of(MethodData, _invocation_counter);
2571  }
2572
2573  static ByteSize backedge_counter_offset() {
2574    return byte_offset_of(MethodData, _backedge_counter);
2575  }
2576
2577  static ByteSize invoke_mask_offset() {
2578    return byte_offset_of(MethodData, _invoke_mask);
2579  }
2580
2581  static ByteSize backedge_mask_offset() {
2582    return byte_offset_of(MethodData, _backedge_mask);
2583  }
2584
2585  static ByteSize parameters_type_data_di_offset() {
2586    return byte_offset_of(MethodData, _parameters_type_data_di);
2587  }
2588
2589  // Deallocation support - no pointer fields to deallocate
2590  void deallocate_contents(ClassLoaderData* loader_data) {}
2591
2592  // GC support
2593  void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
2594
2595  // Printing
2596  void print_on      (outputStream* st) const;
2597  void print_value_on(outputStream* st) const;
2598
2599  // printing support for method data
2600  void print_data_on(outputStream* st) const;
2601
2602  const char* internal_name() const { return "{method data}"; }
2603
2604  // verification
2605  void verify_on(outputStream* st);
2606  void verify_data_on(outputStream* st);
2607
2608  static bool profile_parameters_for_method(const methodHandle& m);
2609  static bool profile_arguments();
2610  static bool profile_arguments_jsr292_only();
2611  static bool profile_return();
2612  static bool profile_parameters();
2613  static bool profile_return_jsr292_only();
2614
2615  void clean_method_data(BoolObjectClosure* is_alive);
2616  void clean_weak_method_links();
2617  DEBUG_ONLY(void verify_clean_weak_method_links();)
2618  Mutex* extra_data_lock() { return &_extra_data_lock; }
2619};
2620
2621#endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
2622