stubs.cpp revision 0:a61af66fc99e
1252867Sdelphij/*
2252867Sdelphij * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
3252867Sdelphij * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4252867Sdelphij *
5252867Sdelphij * This code is free software; you can redistribute it and/or modify it
6252867Sdelphij * under the terms of the GNU General Public License version 2 only, as
7252867Sdelphij * published by the Free Software Foundation.
8252867Sdelphij *
9252867Sdelphij * This code is distributed in the hope that it will be useful, but WITHOUT
10252867Sdelphij * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11252867Sdelphij * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12252867Sdelphij * version 2 for more details (a copy is included in the LICENSE file that
13252867Sdelphij * accompanied this code).
14252867Sdelphij *
15252867Sdelphij * You should have received a copy of the GNU General Public License version
16252867Sdelphij * 2 along with this work; if not, write to the Free Software Foundation,
17252867Sdelphij * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18252867Sdelphij *
19252867Sdelphij * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20252867Sdelphij * CA 95054 USA or visit www.sun.com if you need additional information or
21252867Sdelphij * have any questions.
22252867Sdelphij *
23252867Sdelphij */
24252867Sdelphij
25252867Sdelphij#include "incls/_precompiled.incl"
26252867Sdelphij#include "incls/_stubs.cpp.incl"
27252867Sdelphij
28252867Sdelphij
29252867Sdelphij// Implementation of StubQueue
30252867Sdelphij//
31252867Sdelphij// Standard wrap-around queue implementation; the queue dimensions
32252867Sdelphij// are specified by the _queue_begin & _queue_end indices. The queue
33252867Sdelphij// can be in two states (transparent to the outside):
34252867Sdelphij//
35252867Sdelphij// a) contiguous state: all queue entries in one block (or empty)
36252867Sdelphij//
37252867Sdelphij// Queue: |...|XXXXXXX|...............|
38252867Sdelphij//        ^0  ^begin  ^end            ^size = limit
39252867Sdelphij//            |_______|
40252867Sdelphij//            one block
41252867Sdelphij//
42252867Sdelphij// b) non-contiguous state: queue entries in two blocks
43252867Sdelphij//
44252867Sdelphij// Queue: |XXX|.......|XXXXXXX|.......|
45252867Sdelphij//        ^0  ^end    ^begin  ^limit  ^size
46252867Sdelphij//        |___|       |_______|
47252867Sdelphij//         1st block  2nd block
48252867Sdelphij//
49326005Sdelphij// In the non-contiguous state, the wrap-around point is
50252867Sdelphij// indicated via the _buffer_limit index since the last
51252867Sdelphij// queue entry may not fill up the queue completely in
52252867Sdelphij// which case we need to know where the 2nd block's end
53252867Sdelphij// is to do the proper wrap-around. When removing the
54// last entry of the 2nd block, _buffer_limit is reset
55// to _buffer_size.
56//
57// CAUTION: DO NOT MESS WITH THIS CODE IF YOU CANNOT PROVE
58// ITS CORRECTNESS! THIS CODE IS MORE SUBTLE THAN IT LOOKS!
59
60
61StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
62                     Mutex* lock, const char* name) : _mutex(lock) {
63  intptr_t size = round_to(buffer_size, 2*BytesPerWord);
64  BufferBlob* blob = BufferBlob::create(name, size);
65  if( blob == NULL ) vm_exit_out_of_memory1(size, "CodeCache: no room for %s", name);
66  _stub_interface  = stub_interface;
67  _buffer_size     = blob->instructions_size();
68  _buffer_limit    = blob->instructions_size();
69  _stub_buffer     = blob->instructions_begin();
70  _queue_begin     = 0;
71  _queue_end       = 0;
72  _number_of_stubs = 0;
73  register_queue(this);
74}
75
76
77StubQueue::~StubQueue() {
78  // Note: Currently StubQueues are never destroyed so nothing needs to be done here.
79  //       If we want to implement the destructor, we need to release the BufferBlob
80  //       allocated in the constructor (i.e., we need to keep it around or look it
81  //       up via CodeCache::find_blob(...).
82  Unimplemented();
83}
84
85
86Stub* StubQueue::stub_containing(address pc) const {
87  if (contains(pc)) {
88    for (Stub* s = first(); s != NULL; s = next(s)) {
89      if (stub_contains(s, pc)) return s;
90    }
91  }
92  return NULL;
93}
94
95
96Stub* StubQueue::request_committed(int code_size) {
97  Stub* s = request(code_size);
98  if (s != NULL) commit(code_size);
99  return s;
100}
101
102
103Stub* StubQueue::request(int requested_code_size) {
104  assert(requested_code_size > 0, "requested_code_size must be > 0");
105  if (_mutex != NULL) _mutex->lock();
106  Stub* s = current_stub();
107  int requested_size = round_to(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
108  if (requested_size <= available_space()) {
109    if (is_contiguous()) {
110      // Queue: |...|XXXXXXX|.............|
111      //        ^0  ^begin  ^end          ^size = limit
112      assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
113      if (_queue_end + requested_size <= _buffer_size) {
114        // code fits in at the end => nothing to do
115        stub_initialize(s, requested_size);
116        return s;
117      } else {
118        // stub doesn't fit in at the queue end
119        // => reduce buffer limit & wrap around
120        assert(!is_empty(), "just checkin'");
121        _buffer_limit = _queue_end;
122        _queue_end = 0;
123      }
124    }
125  }
126  if (requested_size <= available_space()) {
127    assert(!is_contiguous(), "just checkin'");
128    assert(_buffer_limit <= _buffer_size, "queue invariant broken");
129    // Queue: |XXX|.......|XXXXXXX|.......|
130    //        ^0  ^end    ^begin  ^limit  ^size
131    s = current_stub();
132    stub_initialize(s, requested_size);
133    return s;
134  }
135  // Not enough space left
136  if (_mutex != NULL) _mutex->unlock();
137  return NULL;
138}
139
140
141void StubQueue::commit(int committed_code_size) {
142  assert(committed_code_size > 0, "committed_code_size must be > 0");
143  int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
144  Stub* s = current_stub();
145  assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
146  stub_initialize(s, committed_size);
147  _queue_end += committed_size;
148  _number_of_stubs++;
149  if (_mutex != NULL) _mutex->unlock();
150  debug_only(stub_verify(s);)
151}
152
153
154void StubQueue::remove_first() {
155  if (number_of_stubs() == 0) return;
156  Stub* s = first();
157  debug_only(stub_verify(s);)
158  stub_finalize(s);
159  _queue_begin += stub_size(s);
160  assert(_queue_begin <= _buffer_limit, "sanity check");
161  if (_queue_begin == _queue_end) {
162    // buffer empty
163    // => reset queue indices
164    _queue_begin  = 0;
165    _queue_end    = 0;
166    _buffer_limit = _buffer_size;
167  } else if (_queue_begin == _buffer_limit) {
168    // buffer limit reached
169    // => reset buffer limit & wrap around
170    _buffer_limit = _buffer_size;
171    _queue_begin = 0;
172  }
173  _number_of_stubs--;
174}
175
176
177void StubQueue::remove_first(int n) {
178  int i = MIN2(n, number_of_stubs());
179  while (i-- > 0) remove_first();
180}
181
182
183void StubQueue::remove_all(){
184  debug_only(verify();)
185  remove_first(number_of_stubs());
186  assert(number_of_stubs() == 0, "sanity check");
187}
188
189
190enum { StubQueueLimit = 10 };  // there are only a few in the world
191static StubQueue* registered_stub_queues[StubQueueLimit];
192
193void StubQueue::register_queue(StubQueue* sq) {
194  for (int i = 0; i < StubQueueLimit; i++) {
195    if (registered_stub_queues[i] == NULL) {
196      registered_stub_queues[i] = sq;
197      return;
198    }
199  }
200  ShouldNotReachHere();
201}
202
203
204void StubQueue::queues_do(void f(StubQueue* sq)) {
205  for (int i = 0; i < StubQueueLimit; i++) {
206    if (registered_stub_queues[i] != NULL) {
207      f(registered_stub_queues[i]);
208    }
209  }
210}
211
212
213void StubQueue::stubs_do(void f(Stub* s)) {
214  debug_only(verify();)
215  MutexLockerEx lock(_mutex);
216  for (Stub* s = first(); s != NULL; s = next(s)) f(s);
217}
218
219
220void StubQueue::verify() {
221  // verify only if initialized
222  if (_stub_buffer == NULL) return;
223  MutexLockerEx lock(_mutex);
224  // verify index boundaries
225  guarantee(0 <= _buffer_size, "buffer size must be positive");
226  guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
227  guarantee(0 <= _queue_begin  && _queue_begin  <  _buffer_limit, "_queue_begin out of bounds");
228  guarantee(0 <= _queue_end    && _queue_end    <= _buffer_limit, "_queue_end   out of bounds");
229  // verify alignment
230  guarantee(_buffer_size  % CodeEntryAlignment == 0, "_buffer_size  not aligned");
231  guarantee(_buffer_limit % CodeEntryAlignment == 0, "_buffer_limit not aligned");
232  guarantee(_queue_begin  % CodeEntryAlignment == 0, "_queue_begin  not aligned");
233  guarantee(_queue_end    % CodeEntryAlignment == 0, "_queue_end    not aligned");
234  // verify buffer limit/size relationship
235  if (is_contiguous()) {
236    guarantee(_buffer_limit == _buffer_size, "_buffer_limit must equal _buffer_size");
237  }
238  // verify contents
239  int n = 0;
240  for (Stub* s = first(); s != NULL; s = next(s)) {
241    stub_verify(s);
242    n++;
243  }
244  guarantee(n == number_of_stubs(), "number of stubs inconsistent");
245  guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
246}
247
248
249void StubQueue::print() {
250  MutexLockerEx lock(_mutex);
251  for (Stub* s = first(); s != NULL; s = next(s)) {
252    stub_print(s);
253  }
254}
255