codeBuffer.hpp revision 7890:f83851ae258e
1/*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_ASM_CODEBUFFER_HPP
26#define SHARE_VM_ASM_CODEBUFFER_HPP
27
28#include "code/oopRecorder.hpp"
29#include "code/relocInfo.hpp"
30#include "utilities/debug.hpp"
31
32class CodeStrings;
33class PhaseCFG;
34class Compile;
35class BufferBlob;
36class CodeBuffer;
37class Label;
38
39class CodeOffsets: public StackObj {
40public:
41  enum Entries { Entry,
42                 Verified_Entry,
43                 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
44                 OSR_Entry,
45                 Exceptions,     // Offset where exception handler lives
46                 Deopt,          // Offset where deopt handler lives
47                 DeoptMH,        // Offset where MethodHandle deopt handler lives
48                 UnwindHandler,  // Offset to default unwind handler
49                 max_Entries };
50
51  // special value to note codeBlobs where profile (forte) stack walking is
52  // always dangerous and suspect.
53
54  enum { frame_never_safe = -1 };
55
56private:
57  int _values[max_Entries];
58
59public:
60  CodeOffsets() {
61    _values[Entry         ] = 0;
62    _values[Verified_Entry] = 0;
63    _values[Frame_Complete] = frame_never_safe;
64    _values[OSR_Entry     ] = 0;
65    _values[Exceptions    ] = -1;
66    _values[Deopt         ] = -1;
67    _values[DeoptMH       ] = -1;
68    _values[UnwindHandler ] = -1;
69  }
70
71  int value(Entries e) { return _values[e]; }
72  void set_value(Entries e, int val) { _values[e] = val; }
73};
74
75// This class represents a stream of code and associated relocations.
76// There are a few in each CodeBuffer.
77// They are filled concurrently, and concatenated at the end.
78class CodeSection VALUE_OBJ_CLASS_SPEC {
79  friend class CodeBuffer;
80 public:
81  typedef int csize_t;  // code size type; would be size_t except for history
82
83 private:
84  address     _start;           // first byte of contents (instructions)
85  address     _mark;            // user mark, usually an instruction beginning
86  address     _end;             // current end address
87  address     _limit;           // last possible (allocated) end address
88  relocInfo*  _locs_start;      // first byte of relocation information
89  relocInfo*  _locs_end;        // first byte after relocation information
90  relocInfo*  _locs_limit;      // first byte after relocation information buf
91  address     _locs_point;      // last relocated position (grows upward)
92  bool        _locs_own;        // did I allocate the locs myself?
93  bool        _frozen;          // no more expansion of this section
94  char        _index;           // my section number (SECT_INST, etc.)
95  CodeBuffer* _outer;           // enclosing CodeBuffer
96
97  // (Note:  _locs_point used to be called _last_reloc_offset.)
98
99  CodeSection() {
100    _start         = NULL;
101    _mark          = NULL;
102    _end           = NULL;
103    _limit         = NULL;
104    _locs_start    = NULL;
105    _locs_end      = NULL;
106    _locs_limit    = NULL;
107    _locs_point    = NULL;
108    _locs_own      = false;
109    _frozen        = false;
110    debug_only(_index = (char)-1);
111    debug_only(_outer = (CodeBuffer*)badAddress);
112  }
113
114  void initialize_outer(CodeBuffer* outer, int index) {
115    _outer = outer;
116    _index = index;
117  }
118
119  void initialize(address start, csize_t size = 0) {
120    assert(_start == NULL, "only one init step, please");
121    _start         = start;
122    _mark          = NULL;
123    _end           = start;
124
125    _limit         = start + size;
126    _locs_point    = start;
127  }
128
129  void initialize_locs(int locs_capacity);
130  void expand_locs(int new_capacity);
131  void initialize_locs_from(const CodeSection* source_cs);
132
133  // helper for CodeBuffer::expand()
134  void take_over_code_from(CodeSection* cs) {
135    _start      = cs->_start;
136    _mark       = cs->_mark;
137    _end        = cs->_end;
138    _limit      = cs->_limit;
139    _locs_point = cs->_locs_point;
140  }
141
142 public:
143  address     start() const         { return _start; }
144  address     mark() const          { return _mark; }
145  address     end() const           { return _end; }
146  address     limit() const         { return _limit; }
147  csize_t     size() const          { return (csize_t)(_end - _start); }
148  csize_t     mark_off() const      { assert(_mark != NULL, "not an offset");
149                                      return (csize_t)(_mark - _start); }
150  csize_t     capacity() const      { return (csize_t)(_limit - _start); }
151  csize_t     remaining() const     { return (csize_t)(_limit - _end); }
152
153  relocInfo*  locs_start() const    { return _locs_start; }
154  relocInfo*  locs_end() const      { return _locs_end; }
155  int         locs_count() const    { return (int)(_locs_end - _locs_start); }
156  relocInfo*  locs_limit() const    { return _locs_limit; }
157  address     locs_point() const    { return _locs_point; }
158  csize_t     locs_point_off() const{ return (csize_t)(_locs_point - _start); }
159  csize_t     locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
160  csize_t     locs_remaining()const { return (csize_t)(_locs_limit - _locs_end); }
161
162  int         index() const         { return _index; }
163  bool        is_allocated() const  { return _start != NULL; }
164  bool        is_empty() const      { return _start == _end; }
165  bool        is_frozen() const     { return _frozen; }
166  bool        has_locs() const      { return _locs_end != NULL; }
167
168  CodeBuffer* outer() const         { return _outer; }
169
170  // is a given address in this section?  (2nd version is end-inclusive)
171  bool contains(address pc) const   { return pc >= _start && pc <  _end; }
172  bool contains2(address pc) const  { return pc >= _start && pc <= _end; }
173  bool allocates(address pc) const  { return pc >= _start && pc <  _limit; }
174  bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
175
176  void    set_end(address pc)       { assert(allocates2(pc), err_msg("not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit))); _end = pc; }
177  void    set_mark(address pc)      { assert(contains2(pc), "not in codeBuffer");
178                                      _mark = pc; }
179  void    set_mark_off(int offset)  { assert(contains2(offset+_start),"not in codeBuffer");
180                                      _mark = offset + _start; }
181  void    set_mark()                { _mark = _end; }
182  void    clear_mark()              { _mark = NULL; }
183
184  void    set_locs_end(relocInfo* p) {
185    assert(p <= locs_limit(), "locs data fits in allocated buffer");
186    _locs_end = p;
187  }
188  void    set_locs_point(address pc) {
189    assert(pc >= locs_point(), "relocation addr may not decrease");
190    assert(allocates2(pc),     "relocation addr must be in this section");
191    _locs_point = pc;
192  }
193
194  // Code emission
195  void emit_int8 ( int8_t  x)  { *((int8_t*)  end()) = x; set_end(end() + sizeof(int8_t)); }
196  void emit_int16( int16_t x)  { *((int16_t*) end()) = x; set_end(end() + sizeof(int16_t)); }
197  void emit_int32( int32_t x)  { *((int32_t*) end()) = x; set_end(end() + sizeof(int32_t)); }
198  void emit_int64( int64_t x)  { *((int64_t*) end()) = x; set_end(end() + sizeof(int64_t)); }
199
200  void emit_float( jfloat  x)  { *((jfloat*)  end()) = x; set_end(end() + sizeof(jfloat)); }
201  void emit_double(jdouble x)  { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); }
202  void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); }
203
204  // Share a scratch buffer for relocinfo.  (Hacky; saves a resource allocation.)
205  void initialize_shared_locs(relocInfo* buf, int length);
206
207  // Manage labels and their addresses.
208  address target(Label& L, address branch_pc);
209
210  // Emit a relocation.
211  void relocate(address at, RelocationHolder const& rspec, int format = 0);
212  void relocate(address at,    relocInfo::relocType rtype, int format = 0) {
213    if (rtype != relocInfo::none)
214      relocate(at, Relocation::spec_simple(rtype), format);
215  }
216
217  // alignment requirement for starting offset
218  // Requirements are that the instruction area and the
219  // stubs area must start on CodeEntryAlignment, and
220  // the ctable on sizeof(jdouble)
221  int alignment() const             { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
222
223  // Slop between sections, used only when allocating temporary BufferBlob buffers.
224  static csize_t end_slop()         { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
225
226  csize_t align_at_start(csize_t off) const { return (csize_t) align_size_up(off, alignment()); }
227
228  // Mark a section frozen.  Assign its remaining space to
229  // the following section.  It will never expand after this point.
230  inline void freeze();         //  { _outer->freeze_section(this); }
231
232  // Ensure there's enough space left in the current section.
233  // Return true if there was an expansion.
234  bool maybe_expand_to_ensure_remaining(csize_t amount);
235
236#ifndef PRODUCT
237  void decode();
238  void dump();
239  void print(const char* name);
240#endif //PRODUCT
241};
242
243class CodeString;
244class CodeStrings VALUE_OBJ_CLASS_SPEC {
245private:
246#ifndef PRODUCT
247  CodeString* _strings;
248#ifdef ASSERT
249  // Becomes true after copy-out, forbids further use.
250  bool _defunct; // Zero bit pattern is "valid", see memset call in decode_env::decode_env
251#endif
252#endif
253
254  CodeString* find(intptr_t offset) const;
255  CodeString* find_last(intptr_t offset) const;
256
257  void set_null_and_invalidate() {
258#ifndef PRODUCT
259    _strings = NULL;
260#ifdef ASSERT
261    _defunct = true;
262#endif
263#endif
264  }
265
266public:
267  CodeStrings() {
268#ifndef PRODUCT
269    _strings = NULL;
270#ifdef ASSERT
271    _defunct = false;
272#endif
273#endif
274  }
275
276  bool is_null() {
277#ifdef ASSERT
278    return _strings == NULL;
279#else
280    return true;
281#endif
282  }
283
284  const char* add_string(const char * string) PRODUCT_RETURN_(return NULL;);
285
286  void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
287  void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN;
288  // MOVE strings from other to this; invalidate other.
289  void assign(CodeStrings& other)  PRODUCT_RETURN;
290  // COPY strings from other to this; leave other valid.
291  void copy(CodeStrings& other)  PRODUCT_RETURN;
292  void free() PRODUCT_RETURN;
293  // Guarantee that _strings are used at most once; assign invalidates a buffer.
294  inline void check_valid() const {
295#ifdef ASSERT
296    assert(!_defunct, "Use of invalid CodeStrings");
297#endif
298  }
299};
300
301// A CodeBuffer describes a memory space into which assembly
302// code is generated.  This memory space usually occupies the
303// interior of a single BufferBlob, but in some cases it may be
304// an arbitrary span of memory, even outside the code cache.
305//
306// A code buffer comes in two variants:
307//
308// (1) A CodeBuffer referring to an already allocated piece of memory:
309//     This is used to direct 'static' code generation (e.g. for interpreter
310//     or stubroutine generation, etc.).  This code comes with NO relocation
311//     information.
312//
313// (2) A CodeBuffer referring to a piece of memory allocated when the
314//     CodeBuffer is allocated.  This is used for nmethod generation.
315//
316// The memory can be divided up into several parts called sections.
317// Each section independently accumulates code (or data) an relocations.
318// Sections can grow (at the expense of a reallocation of the BufferBlob
319// and recopying of all active sections).  When the buffered code is finally
320// written to an nmethod (or other CodeBlob), the contents (code, data,
321// and relocations) of the sections are padded to an alignment and concatenated.
322// Instructions and data in one section can contain relocatable references to
323// addresses in a sibling section.
324
325class CodeBuffer: public StackObj {
326  friend class CodeSection;
327
328 private:
329  // CodeBuffers must be allocated on the stack except for a single
330  // special case during expansion which is handled internally.  This
331  // is done to guarantee proper cleanup of resources.
332  void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
333  void  operator delete(void* p)          { ShouldNotCallThis(); }
334
335 public:
336  typedef int csize_t;  // code size type; would be size_t except for history
337  enum {
338    // Here is the list of all possible sections.  The order reflects
339    // the final layout.
340    SECT_FIRST = 0,
341    SECT_CONSTS = SECT_FIRST, // Non-instruction data:  Floats, jump tables, etc.
342    SECT_INSTS,               // Executable instructions.
343    SECT_STUBS,               // Outbound trampolines for supporting call sites.
344    SECT_LIMIT, SECT_NONE = -1
345  };
346
347 private:
348  enum {
349    sect_bits = 2,      // assert (SECT_LIMIT <= (1<<sect_bits))
350    sect_mask = (1<<sect_bits)-1
351  };
352
353  const char*  _name;
354
355  CodeSection  _consts;             // constants, jump tables
356  CodeSection  _insts;              // instructions (the main section)
357  CodeSection  _stubs;              // stubs (call site support), deopt, exception handling
358
359  CodeBuffer*  _before_expand;  // dead buffer, from before the last expansion
360
361  BufferBlob*  _blob;           // optional buffer in CodeCache for generated code
362  address      _total_start;    // first address of combined memory buffer
363  csize_t      _total_size;     // size in bytes of combined memory buffer
364
365  OopRecorder* _oop_recorder;
366  CodeStrings  _code_strings;
367  OopRecorder  _default_oop_recorder;  // override with initialize_oop_recorder
368  Arena*       _overflow_arena;
369
370  address      _decode_begin;   // start address for decode
371  address      decode_begin();
372
373  void initialize_misc(const char * name) {
374    // all pointers other than code_start/end and those inside the sections
375    assert(name != NULL, "must have a name");
376    _name            = name;
377    _before_expand   = NULL;
378    _blob            = NULL;
379    _oop_recorder    = NULL;
380    _decode_begin    = NULL;
381    _overflow_arena  = NULL;
382  }
383
384  void initialize(address code_start, csize_t code_size) {
385    _consts.initialize_outer(this,  SECT_CONSTS);
386    _insts.initialize_outer(this,   SECT_INSTS);
387    _stubs.initialize_outer(this,   SECT_STUBS);
388    _total_start = code_start;
389    _total_size  = code_size;
390    // Initialize the main section:
391    _insts.initialize(code_start, code_size);
392    assert(!_stubs.is_allocated(),  "no garbage here");
393    assert(!_consts.is_allocated(), "no garbage here");
394    _oop_recorder = &_default_oop_recorder;
395  }
396
397  void initialize_section_size(CodeSection* cs, csize_t size);
398
399  void freeze_section(CodeSection* cs);
400
401  // helper for CodeBuffer::expand()
402  void take_over_code_from(CodeBuffer* cs);
403
404  // ensure sections are disjoint, ordered, and contained in the blob
405  void verify_section_allocation();
406
407  // copies combined relocations to the blob, returns bytes copied
408  // (if target is null, it is a dry run only, just for sizing)
409  csize_t copy_relocations_to(CodeBlob* blob) const;
410
411  // copies combined code to the blob (assumes relocs are already in there)
412  void copy_code_to(CodeBlob* blob);
413
414  // moves code sections to new buffer (assumes relocs are already in there)
415  void relocate_code_to(CodeBuffer* cb) const;
416
417  // set up a model of the final layout of my contents
418  void compute_final_layout(CodeBuffer* dest) const;
419
420  // Expand the given section so at least 'amount' is remaining.
421  // Creates a new, larger BufferBlob, and rewrites the code & relocs.
422  void expand(CodeSection* which_cs, csize_t amount);
423
424  // Helper for expand.
425  csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
426
427 public:
428  // (1) code buffer referring to pre-allocated instruction memory
429  CodeBuffer(address code_start, csize_t code_size) {
430    assert(code_start != NULL, "sanity");
431    initialize_misc("static buffer");
432    initialize(code_start, code_size);
433    verify_section_allocation();
434  }
435
436  // (2) CodeBuffer referring to pre-allocated CodeBlob.
437  CodeBuffer(CodeBlob* blob);
438
439  // (3) code buffer allocating codeBlob memory for code & relocation
440  // info but with lazy initialization.  The name must be something
441  // informative.
442  CodeBuffer(const char* name) {
443    initialize_misc(name);
444  }
445
446
447  // (4) code buffer allocating codeBlob memory for code & relocation
448  // info.  The name must be something informative and code_size must
449  // include both code and stubs sizes.
450  CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) {
451    initialize_misc(name);
452    initialize(code_size, locs_size);
453  }
454
455  ~CodeBuffer();
456
457  // Initialize a CodeBuffer constructed using constructor 3.  Using
458  // constructor 4 is equivalent to calling constructor 3 and then
459  // calling this method.  It's been factored out for convenience of
460  // construction.
461  void initialize(csize_t code_size, csize_t locs_size);
462
463  CodeSection* consts()            { return &_consts; }
464  CodeSection* insts()             { return &_insts; }
465  CodeSection* stubs()             { return &_stubs; }
466
467  // present sections in order; return NULL at end; consts is #0, etc.
468  CodeSection* code_section(int n) {
469    // This makes the slightly questionable but portable assumption
470    // that the various members (_consts, _insts, _stubs, etc.) are
471    // adjacent in the layout of CodeBuffer.
472    CodeSection* cs = &_consts + n;
473    assert(cs->index() == n || !cs->is_allocated(), "sanity");
474    return cs;
475  }
476  const CodeSection* code_section(int n) const {  // yucky const stuff
477    return ((CodeBuffer*)this)->code_section(n);
478  }
479  static const char* code_section_name(int n);
480  int section_index_of(address addr) const;
481  bool contains(address addr) const {
482    // handy for debugging
483    return section_index_of(addr) > SECT_NONE;
484  }
485
486  // A stable mapping between 'locators' (small ints) and addresses.
487  static int locator_pos(int locator)   { return locator >> sect_bits; }
488  static int locator_sect(int locator)  { return locator &  sect_mask; }
489  static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
490  int        locator(address addr) const;
491  address    locator_address(int locator) const;
492
493  // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
494  bool is_backward_branch(Label& L);
495
496  // Properties
497  const char* name() const                  { return _name; }
498  CodeBuffer* before_expand() const         { return _before_expand; }
499  BufferBlob* blob() const                  { return _blob; }
500  void    set_blob(BufferBlob* blob);
501  void   free_blob();                       // Free the blob, if we own one.
502
503  // Properties relative to the insts section:
504  address       insts_begin() const      { return _insts.start();      }
505  address       insts_end() const        { return _insts.end();        }
506  void      set_insts_end(address end)   {        _insts.set_end(end); }
507  address       insts_limit() const      { return _insts.limit();      }
508  address       insts_mark() const       { return _insts.mark();       }
509  void      set_insts_mark()             {        _insts.set_mark();   }
510  void    clear_insts_mark()             {        _insts.clear_mark(); }
511
512  // is there anything in the buffer other than the current section?
513  bool    is_pure() const                { return insts_size() == total_content_size(); }
514
515  // size in bytes of output so far in the insts sections
516  csize_t insts_size() const             { return _insts.size(); }
517
518  // same as insts_size(), except that it asserts there is no non-code here
519  csize_t pure_insts_size() const        { assert(is_pure(), "no non-code");
520                                           return insts_size(); }
521  // capacity in bytes of the insts sections
522  csize_t insts_capacity() const         { return _insts.capacity(); }
523
524  // number of bytes remaining in the insts section
525  csize_t insts_remaining() const        { return _insts.remaining(); }
526
527  // is a given address in the insts section?  (2nd version is end-inclusive)
528  bool insts_contains(address pc) const  { return _insts.contains(pc); }
529  bool insts_contains2(address pc) const { return _insts.contains2(pc); }
530
531  // Record any extra oops required to keep embedded metadata alive
532  void finalize_oop_references(methodHandle method);
533
534  // Allocated size in all sections, when aligned and concatenated
535  // (this is the eventual state of the content in its final
536  // CodeBlob).
537  csize_t total_content_size() const;
538
539  // Combined offset (relative to start of first section) of given
540  // section, as eventually found in the final CodeBlob.
541  csize_t total_offset_of(CodeSection* cs) const;
542
543  // allocated size of all relocation data, including index, rounded up
544  csize_t total_relocation_size() const;
545
546  // allocated size of any and all recorded oops
547  csize_t total_oop_size() const {
548    OopRecorder* recorder = oop_recorder();
549    return (recorder == NULL)? 0: recorder->oop_size();
550  }
551
552  // allocated size of any and all recorded metadata
553  csize_t total_metadata_size() const {
554    OopRecorder* recorder = oop_recorder();
555    return (recorder == NULL)? 0: recorder->metadata_size();
556  }
557
558  // Configuration functions, called immediately after the CB is constructed.
559  // The section sizes are subtracted from the original insts section.
560  // Note:  Call them in reverse section order, because each steals from insts.
561  void initialize_consts_size(csize_t size)            { initialize_section_size(&_consts,  size); }
562  void initialize_stubs_size(csize_t size)             { initialize_section_size(&_stubs,   size); }
563  // Override default oop recorder.
564  void initialize_oop_recorder(OopRecorder* r);
565
566  OopRecorder* oop_recorder() const   { return _oop_recorder; }
567  CodeStrings& strings()              { return _code_strings; }
568
569  void free_strings() {
570    if (!_code_strings.is_null()) {
571      _code_strings.free(); // sets _strings Null as a side-effect.
572    }
573  }
574
575  // Code generation
576  void relocate(address at, RelocationHolder const& rspec, int format = 0) {
577    _insts.relocate(at, rspec, format);
578  }
579  void relocate(address at,    relocInfo::relocType rtype, int format = 0) {
580    _insts.relocate(at, rtype, format);
581  }
582
583  // Management of overflow storage for binding of Labels.
584  GrowableArray<int>* create_patch_overflow();
585
586  // NMethod generation
587  void copy_code_and_locs_to(CodeBlob* blob) {
588    assert(blob != NULL, "sane");
589    copy_relocations_to(blob);
590    copy_code_to(blob);
591  }
592  void copy_values_to(nmethod* nm) {
593    if (!oop_recorder()->is_unused()) {
594      oop_recorder()->copy_values_to(nm);
595    }
596  }
597
598  // Transform an address from the code in this code buffer to a specified code buffer
599  address transform_address(const CodeBuffer &cb, address addr) const;
600
601  void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
602  const char* code_string(const char* str) PRODUCT_RETURN_(return NULL;);
603
604  // Log a little info about section usage in the CodeBuffer
605  void log_section_sizes(const char* name);
606
607#ifndef PRODUCT
608 public:
609  // Printing / Decoding
610  // decodes from decode_begin() to code_end() and sets decode_begin to end
611  void    decode();
612  void    decode_all();         // decodes all the code
613  void    skip_decode();        // sets decode_begin to code_end();
614  void    print();
615#endif
616
617
618  // The following header contains architecture-specific implementations
619#ifdef TARGET_ARCH_x86
620# include "codeBuffer_x86.hpp"
621#endif
622#ifdef TARGET_ARCH_sparc
623# include "codeBuffer_sparc.hpp"
624#endif
625#ifdef TARGET_ARCH_zero
626# include "codeBuffer_zero.hpp"
627#endif
628#ifdef TARGET_ARCH_arm
629# include "codeBuffer_arm.hpp"
630#endif
631#ifdef TARGET_ARCH_ppc
632# include "codeBuffer_ppc.hpp"
633#endif
634#ifdef TARGET_ARCH_aarch64
635# include "codeBuffer_aarch64.hpp"
636#endif
637
638};
639
640
641inline void CodeSection::freeze() {
642  _outer->freeze_section(this);
643}
644
645inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
646  if (remaining() < amount) { _outer->expand(this, amount); return true; }
647  return false;
648}
649
650#endif // SHARE_VM_ASM_CODEBUFFER_HPP
651