codeBuffer.cpp revision 1668:3e8fbc61cee8
1/*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25# include "incls/_precompiled.incl"
26# include "incls/_codeBuffer.cpp.incl"
27
28// The structure of a CodeSection:
29//
30//    _start ->           +----------------+
31//                        | machine code...|
32//    _end ->             |----------------|
33//                        |                |
34//                        |    (empty)     |
35//                        |                |
36//                        |                |
37//                        +----------------+
38//    _limit ->           |                |
39//
40//    _locs_start ->      +----------------+
41//                        |reloc records...|
42//                        |----------------|
43//    _locs_end ->        |                |
44//                        |                |
45//                        |    (empty)     |
46//                        |                |
47//                        |                |
48//                        +----------------+
49//    _locs_limit ->      |                |
50// The _end (resp. _limit) pointer refers to the first
51// unused (resp. unallocated) byte.
52
53// The structure of the CodeBuffer while code is being accumulated:
54//
55//    _total_start ->    \
56//    _insts._start ->              +----------------+
57//                                  |                |
58//                                  |     Code       |
59//                                  |                |
60//    _stubs._start ->              |----------------|
61//                                  |                |
62//                                  |    Stubs       | (also handlers for deopt/exception)
63//                                  |                |
64//    _consts._start ->             |----------------|
65//                                  |                |
66//                                  |   Constants    |
67//                                  |                |
68//                                  +----------------+
69//    + _total_size ->              |                |
70//
71// When the code and relocations are copied to the code cache,
72// the empty parts of each section are removed, and everything
73// is copied into contiguous locations.
74
75typedef CodeBuffer::csize_t csize_t;  // file-local definition
76
77// External buffer, in a predefined CodeBlob.
78// Important: The code_start must be taken exactly, and not realigned.
79CodeBuffer::CodeBuffer(CodeBlob* blob) {
80  initialize_misc("static buffer");
81  initialize(blob->content_begin(), blob->content_size());
82  assert(verify_section_allocation(), "initial use of buffer OK");
83}
84
85void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
86  // Compute maximal alignment.
87  int align = _insts.alignment();
88  // Always allow for empty slop around each section.
89  int slop = (int) CodeSection::end_slop();
90
91  assert(blob() == NULL, "only once");
92  set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1)));
93  if (blob() == NULL) {
94    // The assembler constructor will throw a fatal on an empty CodeBuffer.
95    return;  // caller must test this
96  }
97
98  // Set up various pointers into the blob.
99  initialize(_total_start, _total_size);
100
101  assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
102
103  pd_initialize();
104
105  if (locs_size != 0) {
106    _insts.initialize_locs(locs_size / sizeof(relocInfo));
107  }
108
109  assert(verify_section_allocation(), "initial use of blob is OK");
110}
111
112
113CodeBuffer::~CodeBuffer() {
114  // If we allocate our code buffer from the CodeCache
115  // via a BufferBlob, and it's not permanent, then
116  // free the BufferBlob.
117  // The rest of the memory will be freed when the ResourceObj
118  // is released.
119  assert(verify_section_allocation(), "final storage configuration still OK");
120  for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) {
121    // Previous incarnations of this buffer are held live, so that internal
122    // addresses constructed before expansions will not be confused.
123    cb->free_blob();
124  }
125
126  // free any overflow storage
127  delete _overflow_arena;
128
129#ifdef ASSERT
130  // Save allocation type to execute assert in ~ResourceObj()
131  // which is called after this destructor.
132  ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
133  Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
134  ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
135#endif
136}
137
138void CodeBuffer::initialize_oop_recorder(OopRecorder* r) {
139  assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once");
140  DEBUG_ONLY(_default_oop_recorder.oop_size());  // force unused OR to be frozen
141  _oop_recorder = r;
142}
143
144void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
145  assert(cs != &_insts, "insts is the memory provider, not the consumer");
146#ifdef ASSERT
147  for (int n = (int)SECT_INSTS+1; n < (int)SECT_LIMIT; n++) {
148    CodeSection* prevCS = code_section(n);
149    if (prevCS == cs)  break;
150    assert(!prevCS->is_allocated(), "section allocation must be in reverse order");
151  }
152#endif
153  csize_t slop = CodeSection::end_slop();  // margin between sections
154  int align = cs->alignment();
155  assert(is_power_of_2(align), "sanity");
156  address start  = _insts._start;
157  address limit  = _insts._limit;
158  address middle = limit - size;
159  middle -= (intptr_t)middle & (align-1);  // align the division point downward
160  guarantee(middle - slop > start, "need enough space to divide up");
161  _insts._limit = middle - slop;  // subtract desired space, plus slop
162  cs->initialize(middle, limit - middle);
163  assert(cs->start() == middle, "sanity");
164  assert(cs->limit() == limit,  "sanity");
165  // give it some relocations to start with, if the main section has them
166  if (_insts.has_locs())  cs->initialize_locs(1);
167}
168
169void CodeBuffer::freeze_section(CodeSection* cs) {
170  CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1);
171  csize_t frozen_size = cs->size();
172  if (next_cs != NULL) {
173    frozen_size = next_cs->align_at_start(frozen_size);
174  }
175  address old_limit = cs->limit();
176  address new_limit = cs->start() + frozen_size;
177  relocInfo* old_locs_limit = cs->locs_limit();
178  relocInfo* new_locs_limit = cs->locs_end();
179  // Patch the limits.
180  cs->_limit = new_limit;
181  cs->_locs_limit = new_locs_limit;
182  cs->_frozen = true;
183  if (!next_cs->is_allocated() && !next_cs->is_frozen()) {
184    // Give remaining buffer space to the following section.
185    next_cs->initialize(new_limit, old_limit - new_limit);
186    next_cs->initialize_shared_locs(new_locs_limit,
187                                    old_locs_limit - new_locs_limit);
188  }
189}
190
191void CodeBuffer::set_blob(BufferBlob* blob) {
192  _blob = blob;
193  if (blob != NULL) {
194    address start = blob->content_begin();
195    address end   = blob->content_end();
196    // Round up the starting address.
197    int align = _insts.alignment();
198    start += (-(intptr_t)start) & (align-1);
199    _total_start = start;
200    _total_size  = end - start;
201  } else {
202    #ifdef ASSERT
203    // Clean out dangling pointers.
204    _total_start    = badAddress;
205    _insts._start   = _insts._end   = badAddress;
206    _stubs._start   = _stubs._end   = badAddress;
207    _consts._start  = _consts._end  = badAddress;
208    #endif //ASSERT
209  }
210}
211
212void CodeBuffer::free_blob() {
213  if (_blob != NULL) {
214    BufferBlob::free(_blob);
215    set_blob(NULL);
216  }
217}
218
219const char* CodeBuffer::code_section_name(int n) {
220#ifdef PRODUCT
221  return NULL;
222#else //PRODUCT
223  switch (n) {
224  case SECT_INSTS:             return "insts";
225  case SECT_STUBS:             return "stubs";
226  case SECT_CONSTS:            return "consts";
227  default:                     return NULL;
228  }
229#endif //PRODUCT
230}
231
232int CodeBuffer::section_index_of(address addr) const {
233  for (int n = 0; n < (int)SECT_LIMIT; n++) {
234    const CodeSection* cs = code_section(n);
235    if (cs->allocates(addr))  return n;
236  }
237  return SECT_NONE;
238}
239
240int CodeBuffer::locator(address addr) const {
241  for (int n = 0; n < (int)SECT_LIMIT; n++) {
242    const CodeSection* cs = code_section(n);
243    if (cs->allocates(addr)) {
244      return locator(addr - cs->start(), n);
245    }
246  }
247  return -1;
248}
249
250address CodeBuffer::locator_address(int locator) const {
251  if (locator < 0)  return NULL;
252  address start = code_section(locator_sect(locator))->start();
253  return start + locator_pos(locator);
254}
255
256address CodeBuffer::decode_begin() {
257  address begin = _insts.start();
258  if (_decode_begin != NULL && _decode_begin > begin)
259    begin = _decode_begin;
260  return begin;
261}
262
263
264GrowableArray<int>* CodeBuffer::create_patch_overflow() {
265  if (_overflow_arena == NULL) {
266    _overflow_arena = new Arena();
267  }
268  return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
269}
270
271
272// Helper function for managing labels and their target addresses.
273// Returns a sensible address, and if it is not the label's final
274// address, notes the dependency (at 'branch_pc') on the label.
275address CodeSection::target(Label& L, address branch_pc) {
276  if (L.is_bound()) {
277    int loc = L.loc();
278    if (index() == CodeBuffer::locator_sect(loc)) {
279      return start() + CodeBuffer::locator_pos(loc);
280    } else {
281      return outer()->locator_address(loc);
282    }
283  } else {
284    assert(allocates2(branch_pc), "sanity");
285    address base = start();
286    int patch_loc = CodeBuffer::locator(branch_pc - base, index());
287    L.add_patch_at(outer(), patch_loc);
288
289    // Need to return a pc, doesn't matter what it is since it will be
290    // replaced during resolution later.
291    // Don't return NULL or badAddress, since branches shouldn't overflow.
292    // Don't return base either because that could overflow displacements
293    // for shorter branches.  It will get checked when bound.
294    return branch_pc;
295  }
296}
297
298void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
299  Relocation* reloc = spec.reloc();
300  relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
301  if (rtype == relocInfo::none)  return;
302
303  // The assertion below has been adjusted, to also work for
304  // relocation for fixup.  Sometimes we want to put relocation
305  // information for the next instruction, since it will be patched
306  // with a call.
307  assert(start() <= at && at <= end()+1,
308         "cannot relocate data outside code boundaries");
309
310  if (!has_locs()) {
311    // no space for relocation information provided => code cannot be
312    // relocated.  Make sure that relocate is only called with rtypes
313    // that can be ignored for this kind of code.
314    assert(rtype == relocInfo::none              ||
315           rtype == relocInfo::runtime_call_type ||
316           rtype == relocInfo::internal_word_type||
317           rtype == relocInfo::section_word_type ||
318           rtype == relocInfo::external_word_type,
319           "code needs relocation information");
320    // leave behind an indication that we attempted a relocation
321    DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
322    return;
323  }
324
325  // Advance the point, noting the offset we'll have to record.
326  csize_t offset = at - locs_point();
327  set_locs_point(at);
328
329  // Test for a couple of overflow conditions; maybe expand the buffer.
330  relocInfo* end = locs_end();
331  relocInfo* req = end + relocInfo::length_limit;
332  // Check for (potential) overflow
333  if (req >= locs_limit() || offset >= relocInfo::offset_limit()) {
334    req += (uint)offset / (uint)relocInfo::offset_limit();
335    if (req >= locs_limit()) {
336      // Allocate or reallocate.
337      expand_locs(locs_count() + (req - end));
338      // reload pointer
339      end = locs_end();
340    }
341  }
342
343  // If the offset is giant, emit filler relocs, of type 'none', but
344  // each carrying the largest possible offset, to advance the locs_point.
345  while (offset >= relocInfo::offset_limit()) {
346    assert(end < locs_limit(), "adjust previous paragraph of code");
347    *end++ = filler_relocInfo();
348    offset -= filler_relocInfo().addr_offset();
349  }
350
351  // If it's a simple reloc with no data, we'll just write (rtype | offset).
352  (*end) = relocInfo(rtype, offset, format);
353
354  // If it has data, insert the prefix, as (data_prefix_tag | data1), data2.
355  end->initialize(this, reloc);
356}
357
358void CodeSection::initialize_locs(int locs_capacity) {
359  assert(_locs_start == NULL, "only one locs init step, please");
360  // Apply a priori lower limits to relocation size:
361  csize_t min_locs = MAX2(size() / 16, (csize_t)4);
362  if (locs_capacity < min_locs)  locs_capacity = min_locs;
363  relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity);
364  _locs_start    = locs_start;
365  _locs_end      = locs_start;
366  _locs_limit    = locs_start + locs_capacity;
367  _locs_own      = true;
368}
369
370void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
371  assert(_locs_start == NULL, "do this before locs are allocated");
372  // Internal invariant:  locs buf must be fully aligned.
373  // See copy_relocations_to() below.
374  while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
375    ++buf; --length;
376  }
377  if (length > 0) {
378    _locs_start = buf;
379    _locs_end   = buf;
380    _locs_limit = buf + length;
381    _locs_own   = false;
382  }
383}
384
385void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
386  int lcount = source_cs->locs_count();
387  if (lcount != 0) {
388    initialize_shared_locs(source_cs->locs_start(), lcount);
389    _locs_end = _locs_limit = _locs_start + lcount;
390    assert(is_allocated(), "must have copied code already");
391    set_locs_point(start() + source_cs->locs_point_off());
392  }
393  assert(this->locs_count() == source_cs->locs_count(), "sanity");
394}
395
396void CodeSection::expand_locs(int new_capacity) {
397  if (_locs_start == NULL) {
398    initialize_locs(new_capacity);
399    return;
400  } else {
401    int old_count    = locs_count();
402    int old_capacity = locs_capacity();
403    if (new_capacity < old_capacity * 2)
404      new_capacity = old_capacity * 2;
405    relocInfo* locs_start;
406    if (_locs_own) {
407      locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
408    } else {
409      locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
410      Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
411      _locs_own = true;
412    }
413    _locs_start    = locs_start;
414    _locs_end      = locs_start + old_count;
415    _locs_limit    = locs_start + new_capacity;
416  }
417}
418
419
420/// Support for emitting the code to its final location.
421/// The pattern is the same for all functions.
422/// We iterate over all the sections, padding each to alignment.
423
424csize_t CodeBuffer::total_content_size() const {
425  csize_t size_so_far = 0;
426  for (int n = 0; n < (int)SECT_LIMIT; n++) {
427    const CodeSection* cs = code_section(n);
428    if (cs->is_empty())  continue;  // skip trivial section
429    size_so_far = cs->align_at_start(size_so_far);
430    size_so_far += cs->size();
431  }
432  return size_so_far;
433}
434
435void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
436  address buf = dest->_total_start;
437  csize_t buf_offset = 0;
438  assert(dest->_total_size >= total_content_size(), "must be big enough");
439
440  {
441    // not sure why this is here, but why not...
442    int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
443    assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
444  }
445
446  const CodeSection* prev_cs      = NULL;
447  CodeSection*       prev_dest_cs = NULL;
448  for (int n = 0; n < (int)SECT_LIMIT; n++) {
449    // figure compact layout of each section
450    const CodeSection* cs = code_section(n);
451    address cstart = cs->start();
452    address cend   = cs->end();
453    csize_t csize  = cend - cstart;
454
455    CodeSection* dest_cs = dest->code_section(n);
456    if (!cs->is_empty()) {
457      // Compute initial padding; assign it to the previous non-empty guy.
458      // Cf. figure_expanded_capacities.
459      csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
460      if (padding != 0) {
461        buf_offset += padding;
462        assert(prev_dest_cs != NULL, "sanity");
463        prev_dest_cs->_limit += padding;
464      }
465      #ifdef ASSERT
466      if (prev_cs != NULL && prev_cs->is_frozen() && n < SECT_CONSTS) {
467        // Make sure the ends still match up.
468        // This is important because a branch in a frozen section
469        // might target code in a following section, via a Label,
470        // and without a relocation record.  See Label::patch_instructions.
471        address dest_start = buf+buf_offset;
472        csize_t start2start = cs->start() - prev_cs->start();
473        csize_t dest_start2start = dest_start - prev_dest_cs->start();
474        assert(start2start == dest_start2start, "cannot stretch frozen sect");
475      }
476      #endif //ASSERT
477      prev_dest_cs = dest_cs;
478      prev_cs      = cs;
479    }
480
481    debug_only(dest_cs->_start = NULL);  // defeat double-initialization assert
482    dest_cs->initialize(buf+buf_offset, csize);
483    dest_cs->set_end(buf+buf_offset+csize);
484    assert(dest_cs->is_allocated(), "must always be allocated");
485    assert(cs->is_empty() == dest_cs->is_empty(), "sanity");
486
487    buf_offset += csize;
488  }
489
490  // Done calculating sections; did it come out to the right end?
491  assert(buf_offset == total_content_size(), "sanity");
492  assert(dest->verify_section_allocation(), "final configuration works");
493}
494
495csize_t CodeBuffer::total_offset_of(address addr) const {
496  csize_t code_size_so_far = 0;
497  for (int n = 0; n < (int)SECT_LIMIT; n++) {
498    const CodeSection* cs = code_section(n);
499    if (!cs->is_empty()) {
500      code_size_so_far = cs->align_at_start(code_size_so_far);
501    }
502    if (cs->contains2(addr)) {
503      return code_size_so_far + (addr - cs->start());
504    }
505    code_size_so_far += cs->size();
506  }
507#ifndef PRODUCT
508  tty->print_cr("Dangling address " PTR_FORMAT " in:", addr);
509  ((CodeBuffer*)this)->print();
510#endif
511  ShouldNotReachHere();
512  return -1;
513}
514
515csize_t CodeBuffer::total_relocation_size() const {
516  csize_t lsize = copy_relocations_to(NULL);  // dry run only
517  csize_t csize = total_content_size();
518  csize_t total = RelocIterator::locs_and_index_size(csize, lsize);
519  return (csize_t) align_size_up(total, HeapWordSize);
520}
521
522csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
523  address buf = NULL;
524  csize_t buf_offset = 0;
525  csize_t buf_limit = 0;
526  if (dest != NULL) {
527    buf = (address)dest->relocation_begin();
528    buf_limit = (address)dest->relocation_end() - buf;
529    assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned");
530    assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized");
531  }
532  // if dest == NULL, this is just the sizing pass
533
534  csize_t code_end_so_far = 0;
535  csize_t code_point_so_far = 0;
536  for (int n = 0; n < (int)SECT_LIMIT; n++) {
537    // pull relocs out of each section
538    const CodeSection* cs = code_section(n);
539    assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
540    if (cs->is_empty())  continue;  // skip trivial section
541    relocInfo* lstart = cs->locs_start();
542    relocInfo* lend   = cs->locs_end();
543    csize_t    lsize  = (csize_t)( (address)lend - (address)lstart );
544    csize_t    csize  = cs->size();
545    code_end_so_far = cs->align_at_start(code_end_so_far);
546
547    if (lsize > 0) {
548      // Figure out how to advance the combined relocation point
549      // first to the beginning of this section.
550      // We'll insert one or more filler relocs to span that gap.
551      // (Don't bother to improve this by editing the first reloc's offset.)
552      csize_t new_code_point = code_end_so_far;
553      for (csize_t jump;
554           code_point_so_far < new_code_point;
555           code_point_so_far += jump) {
556        jump = new_code_point - code_point_so_far;
557        relocInfo filler = filler_relocInfo();
558        if (jump >= filler.addr_offset()) {
559          jump = filler.addr_offset();
560        } else {  // else shrink the filler to fit
561          filler = relocInfo(relocInfo::none, jump);
562        }
563        if (buf != NULL) {
564          assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
565          *(relocInfo*)(buf+buf_offset) = filler;
566        }
567        buf_offset += sizeof(filler);
568      }
569
570      // Update code point and end to skip past this section:
571      csize_t last_code_point = code_end_so_far + cs->locs_point_off();
572      assert(code_point_so_far <= last_code_point, "sanity");
573      code_point_so_far = last_code_point; // advance past this guy's relocs
574    }
575    code_end_so_far += csize;  // advance past this guy's instructions too
576
577    // Done with filler; emit the real relocations:
578    if (buf != NULL && lsize != 0) {
579      assert(buf_offset + lsize <= buf_limit, "target in bounds");
580      assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
581      if (buf_offset % HeapWordSize == 0) {
582        // Use wordwise copies if possible:
583        Copy::disjoint_words((HeapWord*)lstart,
584                             (HeapWord*)(buf+buf_offset),
585                             (lsize + HeapWordSize-1) / HeapWordSize);
586      } else {
587        Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
588      }
589    }
590    buf_offset += lsize;
591  }
592
593  // Align end of relocation info in target.
594  while (buf_offset % HeapWordSize != 0) {
595    if (buf != NULL) {
596      relocInfo padding = relocInfo(relocInfo::none, 0);
597      assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
598      *(relocInfo*)(buf+buf_offset) = padding;
599    }
600    buf_offset += sizeof(relocInfo);
601  }
602
603  assert(code_end_so_far == total_content_size(), "sanity");
604
605  // Account for index:
606  if (buf != NULL) {
607    RelocIterator::create_index(dest->relocation_begin(),
608                                buf_offset / sizeof(relocInfo),
609                                dest->relocation_end());
610  }
611
612  return buf_offset;
613}
614
615void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
616#ifndef PRODUCT
617  if (PrintNMethods && (WizardMode || Verbose)) {
618    tty->print("done with CodeBuffer:");
619    ((CodeBuffer*)this)->print();
620  }
621#endif //PRODUCT
622
623  CodeBuffer dest(dest_blob);
624  assert(dest_blob->content_size() >= total_content_size(), "good sizing");
625  this->compute_final_layout(&dest);
626  relocate_code_to(&dest);
627
628  // transfer comments from buffer to blob
629  dest_blob->set_comments(_comments);
630
631  // Done moving code bytes; were they the right size?
632  assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
633
634  // Flush generated code
635  ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size());
636}
637
638// Move all my code into another code buffer.
639// Consult applicable relocs to repair embedded addresses.
640void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
641  DEBUG_ONLY(address dest_end = dest->_total_start + dest->_total_size);
642  for (int n = 0; n < (int)SECT_LIMIT; n++) {
643    // pull code out of each section
644    const CodeSection* cs = code_section(n);
645    if (cs->is_empty())  continue;  // skip trivial section
646    CodeSection* dest_cs = dest->code_section(n);
647    assert(cs->size() == dest_cs->size(), "sanity");
648    csize_t usize = dest_cs->size();
649    csize_t wsize = align_size_up(usize, HeapWordSize);
650    assert(dest_cs->start() + wsize <= dest_end, "no overflow");
651    // Copy the code as aligned machine words.
652    // This may also include an uninitialized partial word at the end.
653    Copy::disjoint_words((HeapWord*)cs->start(),
654                         (HeapWord*)dest_cs->start(),
655                         wsize / HeapWordSize);
656
657    if (dest->blob() == NULL) {
658      // Destination is a final resting place, not just another buffer.
659      // Normalize uninitialized bytes in the final padding.
660      Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
661                          Assembler::code_fill_byte());
662    }
663
664    assert(cs->locs_start() != (relocInfo*)badAddress,
665           "this section carries no reloc storage, but reloc was attempted");
666
667    // Make the new code copy use the old copy's relocations:
668    dest_cs->initialize_locs_from(cs);
669
670    { // Repair the pc relative information in the code after the move
671      RelocIterator iter(dest_cs);
672      while (iter.next()) {
673        iter.reloc()->fix_relocation_after_move(this, dest);
674      }
675    }
676  }
677}
678
679csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs,
680                                               csize_t amount,
681                                               csize_t* new_capacity) {
682  csize_t new_total_cap = 0;
683
684  int prev_n = -1;
685  for (int n = 0; n < (int)SECT_LIMIT; n++) {
686    const CodeSection* sect = code_section(n);
687
688    if (!sect->is_empty()) {
689      // Compute initial padding; assign it to the previous non-empty guy.
690      // Cf. compute_final_layout.
691      csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
692      if (padding != 0) {
693        new_total_cap += padding;
694        assert(prev_n >= 0, "sanity");
695        new_capacity[prev_n] += padding;
696      }
697      prev_n = n;
698    }
699
700    csize_t exp = sect->size();  // 100% increase
701    if ((uint)exp < 4*K)  exp = 4*K;       // minimum initial increase
702    if (sect == which_cs) {
703      if (exp < amount)  exp = amount;
704      if (StressCodeBuffers)  exp = amount;  // expand only slightly
705    } else if (n == SECT_INSTS) {
706      // scale down inst increases to a more modest 25%
707      exp = 4*K + ((exp - 4*K) >> 2);
708      if (StressCodeBuffers)  exp = amount / 2;  // expand only slightly
709    } else if (sect->is_empty()) {
710      // do not grow an empty secondary section
711      exp = 0;
712    }
713    // Allow for inter-section slop:
714    exp += CodeSection::end_slop();
715    csize_t new_cap = sect->size() + exp;
716    if (new_cap < sect->capacity()) {
717      // No need to expand after all.
718      new_cap = sect->capacity();
719    }
720    new_capacity[n] = new_cap;
721    new_total_cap += new_cap;
722  }
723
724  return new_total_cap;
725}
726
727void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
728#ifndef PRODUCT
729  if (PrintNMethods && (WizardMode || Verbose)) {
730    tty->print("expanding CodeBuffer:");
731    this->print();
732  }
733
734  if (StressCodeBuffers && blob() != NULL) {
735    static int expand_count = 0;
736    if (expand_count >= 0)  expand_count += 1;
737    if (expand_count > 100 && is_power_of_2(expand_count)) {
738      tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count);
739      // simulate an occasional allocation failure:
740      free_blob();
741    }
742  }
743#endif //PRODUCT
744
745  // Resizing must be allowed
746  {
747    if (blob() == NULL)  return;  // caller must check for blob == NULL
748    for (int n = 0; n < (int)SECT_LIMIT; n++) {
749      guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen");
750    }
751  }
752
753  // Figure new capacity for each section.
754  csize_t new_capacity[SECT_LIMIT];
755  csize_t new_total_cap
756    = figure_expanded_capacities(which_cs, amount, new_capacity);
757
758  // Create a new (temporary) code buffer to hold all the new data
759  CodeBuffer cb(name(), new_total_cap, 0);
760  if (cb.blob() == NULL) {
761    // Failed to allocate in code cache.
762    free_blob();
763    return;
764  }
765
766  // Create an old code buffer to remember which addresses used to go where.
767  // This will be useful when we do final assembly into the code cache,
768  // because we will need to know how to warp any internal address that
769  // has been created at any time in this CodeBuffer's past.
770  CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
771  bxp->take_over_code_from(this);  // remember the old undersized blob
772  DEBUG_ONLY(this->_blob = NULL);  // silence a later assert
773  bxp->_before_expand = this->_before_expand;
774  this->_before_expand = bxp;
775
776  // Give each section its required (expanded) capacity.
777  for (int n = (int)SECT_LIMIT-1; n >= SECT_INSTS; n--) {
778    CodeSection* cb_sect   = cb.code_section(n);
779    CodeSection* this_sect = code_section(n);
780    if (new_capacity[n] == 0)  continue;  // already nulled out
781    if (n > SECT_INSTS) {
782      cb.initialize_section_size(cb_sect, new_capacity[n]);
783    }
784    assert(cb_sect->capacity() >= new_capacity[n], "big enough");
785    address cb_start = cb_sect->start();
786    cb_sect->set_end(cb_start + this_sect->size());
787    if (this_sect->mark() == NULL) {
788      cb_sect->clear_mark();
789    } else {
790      cb_sect->set_mark(cb_start + this_sect->mark_off());
791    }
792  }
793
794  // Move all the code and relocations to the new blob:
795  relocate_code_to(&cb);
796
797  // Copy the temporary code buffer into the current code buffer.
798  // Basically, do {*this = cb}, except for some control information.
799  this->take_over_code_from(&cb);
800  cb.set_blob(NULL);
801
802  // Zap the old code buffer contents, to avoid mistakenly using them.
803  debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
804                                 badCodeHeapFreeVal));
805
806  _decode_begin = NULL;  // sanity
807
808  // Make certain that the new sections are all snugly inside the new blob.
809  assert(verify_section_allocation(), "expanded allocation is ship-shape");
810
811#ifndef PRODUCT
812  if (PrintNMethods && (WizardMode || Verbose)) {
813    tty->print("expanded CodeBuffer:");
814    this->print();
815  }
816#endif //PRODUCT
817}
818
819void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
820  // Must already have disposed of the old blob somehow.
821  assert(blob() == NULL, "must be empty");
822#ifdef ASSERT
823
824#endif
825  // Take the new blob away from cb.
826  set_blob(cb->blob());
827  // Take over all the section pointers.
828  for (int n = 0; n < (int)SECT_LIMIT; n++) {
829    CodeSection* cb_sect   = cb->code_section(n);
830    CodeSection* this_sect = code_section(n);
831    this_sect->take_over_code_from(cb_sect);
832  }
833  _overflow_arena = cb->_overflow_arena;
834  // Make sure the old cb won't try to use it or free it.
835  DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
836}
837
838#ifdef ASSERT
839bool CodeBuffer::verify_section_allocation() {
840  address tstart = _total_start;
841  if (tstart == badAddress)  return true;  // smashed by set_blob(NULL)
842  address tend   = tstart + _total_size;
843  if (_blob != NULL) {
844    assert(tstart >= _blob->content_begin(), "sanity");
845    assert(tend   <= _blob->content_end(),   "sanity");
846  }
847  address tcheck = tstart;  // advancing pointer to verify disjointness
848  for (int n = 0; n < (int)SECT_LIMIT; n++) {
849    CodeSection* sect = code_section(n);
850    if (!sect->is_allocated())  continue;
851    assert(sect->start() >= tcheck, "sanity");
852    tcheck = sect->start();
853    assert((intptr_t)tcheck % sect->alignment() == 0
854           || sect->is_empty() || _blob == NULL,
855           "start is aligned");
856    assert(sect->end()   >= tcheck, "sanity");
857    assert(sect->end()   <= tend,   "sanity");
858  }
859  return true;
860}
861#endif //ASSERT
862
863#ifndef PRODUCT
864
865void CodeSection::dump() {
866  address ptr = start();
867  for (csize_t step; ptr < end(); ptr += step) {
868    step = end() - ptr;
869    if (step > jintSize * 4)  step = jintSize * 4;
870    tty->print(PTR_FORMAT ": ", ptr);
871    while (step > 0) {
872      tty->print(" " PTR32_FORMAT, *(jint*)ptr);
873      ptr += jintSize;
874    }
875    tty->cr();
876  }
877}
878
879
880void CodeSection::decode() {
881  Disassembler::decode(start(), end());
882}
883
884
885void CodeBuffer::block_comment(intptr_t offset, const char * comment) {
886  _comments.add_comment(offset, comment);
887}
888
889
890class CodeComment: public CHeapObj {
891 private:
892  friend class CodeComments;
893  intptr_t     _offset;
894  const char * _comment;
895  CodeComment* _next;
896
897  ~CodeComment() {
898    assert(_next == NULL, "wrong interface for freeing list");
899    os::free((void*)_comment);
900  }
901
902 public:
903  CodeComment(intptr_t offset, const char * comment) {
904    _offset = offset;
905    _comment = os::strdup(comment);
906    _next = NULL;
907  }
908
909  intptr_t     offset()  const { return _offset;  }
910  const char * comment() const { return _comment; }
911  CodeComment* next()          { return _next; }
912
913  void set_next(CodeComment* next) { _next = next; }
914
915  CodeComment* find(intptr_t offset) {
916    CodeComment* a = this;
917    while (a != NULL && a->_offset != offset) {
918      a = a->_next;
919    }
920    return a;
921  }
922};
923
924
925void CodeComments::add_comment(intptr_t offset, const char * comment) {
926  CodeComment* c = new CodeComment(offset, comment);
927  CodeComment* insert = NULL;
928  if (_comments != NULL) {
929    CodeComment* c = _comments->find(offset);
930    insert = c;
931    while (c && c->offset() == offset) {
932      insert = c;
933      c = c->next();
934    }
935  }
936  if (insert) {
937    // insert after comments with same offset
938    c->set_next(insert->next());
939    insert->set_next(c);
940  } else {
941    c->set_next(_comments);
942    _comments = c;
943  }
944}
945
946
947void CodeComments::assign(CodeComments& other) {
948  assert(_comments == NULL, "don't overwrite old value");
949  _comments = other._comments;
950}
951
952
953void CodeComments::print_block_comment(outputStream* stream, intptr_t offset) {
954  if (_comments != NULL) {
955    CodeComment* c = _comments->find(offset);
956    while (c && c->offset() == offset) {
957      stream->bol();
958      stream->print("  ;; ");
959      stream->print_cr(c->comment());
960      c = c->next();
961    }
962  }
963}
964
965
966void CodeComments::free() {
967  CodeComment* n = _comments;
968  while (n) {
969    // unlink the node from the list saving a pointer to the next
970    CodeComment* p = n->_next;
971    n->_next = NULL;
972    delete n;
973    n = p;
974  }
975  _comments = NULL;
976}
977
978
979
980void CodeBuffer::decode() {
981  Disassembler::decode(decode_begin(), insts_end());
982  _decode_begin = insts_end();
983}
984
985
986void CodeBuffer::skip_decode() {
987  _decode_begin = insts_end();
988}
989
990
991void CodeBuffer::decode_all() {
992  for (int n = 0; n < (int)SECT_LIMIT; n++) {
993    // dump contents of each section
994    CodeSection* cs = code_section(n);
995    tty->print_cr("! %s:", code_section_name(n));
996    if (cs != consts())
997      cs->decode();
998    else
999      cs->dump();
1000  }
1001}
1002
1003
1004void CodeSection::print(const char* name) {
1005  csize_t locs_size = locs_end() - locs_start();
1006  tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s",
1007                name, start(), end(), limit(), size(), capacity(),
1008                is_frozen()? " [frozen]": "");
1009  tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d",
1010                name, locs_start(), locs_end(), locs_limit(), locs_size, locs_capacity(), locs_point_off());
1011  if (PrintRelocations) {
1012    RelocIterator iter(this);
1013    iter.print();
1014  }
1015}
1016
1017void CodeBuffer::print() {
1018  if (this == NULL) {
1019    tty->print_cr("NULL CodeBuffer pointer");
1020    return;
1021  }
1022
1023  tty->print_cr("CodeBuffer:");
1024  for (int n = 0; n < (int)SECT_LIMIT; n++) {
1025    // print each section
1026    CodeSection* cs = code_section(n);
1027    cs->print(code_section_name(n));
1028  }
1029}
1030
1031#endif // PRODUCT
1032