yieldingWorkgroup.cpp revision 8477:cb355530d9d5
1/*
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/cms/yieldingWorkgroup.hpp"
27#include "utilities/macros.hpp"
28
29// Forward declaration of classes declared here.
30
31class GangWorker;
32class WorkData;
33
34YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
35  const char* name, uint workers, bool are_GC_task_threads) :
36  FlexibleWorkGang(name, workers, are_GC_task_threads, false),
37    _yielded_workers(0) {}
38
39GangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
40  YieldingFlexibleGangWorker* new_member =
41      new YieldingFlexibleGangWorker(this, which);
42  return (YieldingFlexibleGangWorker*) new_member;
43}
44
45// Run a task; returns when the task is done, or the workers yield,
46// or the task is aborted.
47// A task that has been yielded can be continued via this interface
48// by using the same task repeatedly as the argument to the call.
49// It is expected that the YieldingFlexibleGangTask carries the appropriate
50// continuation information used by workers to continue the task
51// from its last yield point. Thus, a completed task will return
52// immediately with no actual work having been done by the workers.
53/////////////////////
54// Implementatiuon notes: remove before checking XXX
55/*
56Each gang is working on a task at a certain time.
57Some subset of workers may have yielded and some may
58have finished their quota of work. Until this task has
59been completed, the workers are bound to that task.
60Once the task has been completed, the gang unbounds
61itself from the task.
62
63The yielding work gang thus exports two invokation
64interfaces: run_task() and continue_task(). The
65first is used to initiate a new task and bind it
66to the workers; the second is used to continue an
67already bound task that has yielded. Upon completion
68the binding is released and a new binding may be
69created.
70
71The shape of a yielding work gang is as follows:
72
73Overseer invokes run_task(*task).
74   Lock gang monitor
75   Check that there is no existing binding for the gang
76   If so, abort with an error
77   Else, create a new binding of this gang to the given task
78   Set number of active workers (as asked)
79   Notify workers that work is ready to be done
80     [the requisite # workers would then start up
81      and do the task]
82   Wait on the monitor until either
83     all work is completed or the task has yielded
84     -- this is normally done through
85        yielded + completed == active
86        [completed workers are rest to idle state by overseer?]
87   return appropriate status to caller
88
89Overseer invokes continue_task(*task),
90   Lock gang monitor
91   Check that task is the same as current binding
92   If not, abort with an error
93   Else, set the number of active workers as requested?
94   Notify workers that they can continue from yield points
95    New workers can also start up as required
96      while satisfying the constraint that
97         active + yielded does not exceed required number
98   Wait (as above).
99
100NOTE: In the above, for simplicity in a first iteration
101  our gangs will be of fixed population and will not
102  therefore be flexible work gangs, just yielding work
103  gangs. Once this works well, we will in a second
104  iteration.refinement introduce flexibility into
105  the work gang.
106
107NOTE: we can always create a new gang per each iteration
108  in order to get the flexibility, but we will for now
109  desist that simplified route.
110
111 */
112/////////////////////
113void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
114  MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
115  assert(task() == NULL, "Gang currently tied to a task");
116  assert(new_task != NULL, "Null task");
117  // Bind task to gang
118  _task = new_task;
119  new_task->set_gang(this);  // Establish 2-way binding to support yielding
120  _sequence_number++;
121
122  uint requested_size = new_task->requested_size();
123  if (requested_size != 0) {
124    _active_workers = MIN2(requested_size, total_workers());
125  } else {
126    _active_workers = active_workers();
127  }
128  new_task->set_actual_size(_active_workers);
129  new_task->set_for_termination(_active_workers);
130
131  assert(_started_workers == 0, "Tabula rasa non");
132  assert(_finished_workers == 0, "Tabula rasa non");
133  assert(_yielded_workers == 0, "Tabula rasa non");
134  yielding_task()->set_status(ACTIVE);
135
136  // Wake up all the workers, the first few will get to work,
137  // and the rest will go back to sleep
138  monitor()->notify_all();
139  wait_for_gang();
140}
141
142void YieldingFlexibleWorkGang::wait_for_gang() {
143
144  assert(monitor()->owned_by_self(), "Data race");
145  // Wait for task to complete or yield
146  for (Status status = yielding_task()->status();
147       status != COMPLETED && status != YIELDED && status != ABORTED;
148       status = yielding_task()->status()) {
149    assert(started_workers() <= active_workers(), "invariant");
150    assert(finished_workers() <= active_workers(), "invariant");
151    assert(yielded_workers() <= active_workers(), "invariant");
152    monitor()->wait(Mutex::_no_safepoint_check_flag);
153  }
154  switch (yielding_task()->status()) {
155    case COMPLETED:
156    case ABORTED: {
157      assert(finished_workers() == active_workers(), "Inconsistent status");
158      assert(yielded_workers() == 0, "Invariant");
159      reset();   // for next task; gang<->task binding released
160      break;
161    }
162    case YIELDED: {
163      assert(yielded_workers() > 0, "Invariant");
164      assert(yielded_workers() + finished_workers() == active_workers(),
165             "Inconsistent counts");
166      break;
167    }
168    case ACTIVE:
169    case INACTIVE:
170    case COMPLETING:
171    case YIELDING:
172    case ABORTING:
173    default:
174      ShouldNotReachHere();
175  }
176}
177
178void YieldingFlexibleWorkGang::continue_task(
179  YieldingFlexibleGangTask* gang_task) {
180
181  MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
182  assert(task() != NULL && task() == gang_task, "Incorrect usage");
183  assert(_started_workers == _active_workers, "Precondition");
184  assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
185         "Else why are we calling continue_task()");
186  // Restart the yielded gang workers
187  yielding_task()->set_status(ACTIVE);
188  monitor()->notify_all();
189  wait_for_gang();
190}
191
192void YieldingFlexibleWorkGang::reset() {
193  _started_workers  = 0;
194  _finished_workers = 0;
195  yielding_task()->set_gang(NULL);
196  _task = NULL;    // unbind gang from task
197}
198
199void YieldingFlexibleWorkGang::yield() {
200  assert(task() != NULL, "Inconsistency; should have task binding");
201  MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
202  assert(yielded_workers() < active_workers(), "Consistency check");
203  if (yielding_task()->status() == ABORTING) {
204    // Do not yield; we need to abort as soon as possible
205    // XXX NOTE: This can cause a performance pathology in the
206    // current implementation in Mustang, as of today, and
207    // pre-Mustang in that as soon as an overflow occurs,
208    // yields will not be honoured. The right way to proceed
209    // of course is to fix bug # TBF, so that abort's cause
210    // us to return at each potential yield point.
211    return;
212  }
213  if (++_yielded_workers + finished_workers() == active_workers()) {
214    yielding_task()->set_status(YIELDED);
215    monitor()->notify_all();
216  } else {
217    yielding_task()->set_status(YIELDING);
218  }
219
220  while (true) {
221    switch (yielding_task()->status()) {
222      case YIELDING:
223      case YIELDED: {
224        monitor()->wait(Mutex::_no_safepoint_check_flag);
225        break;  // from switch
226      }
227      case ACTIVE:
228      case ABORTING:
229      case COMPLETING: {
230        assert(_yielded_workers > 0, "Else why am i here?");
231        _yielded_workers--;
232        return;
233      }
234      case INACTIVE:
235      case ABORTED:
236      case COMPLETED:
237      default: {
238        ShouldNotReachHere();
239      }
240    }
241  }
242  // Only return is from inside switch statement above
243  ShouldNotReachHere();
244}
245
246void YieldingFlexibleWorkGang::abort() {
247  assert(task() != NULL, "Inconsistency; should have task binding");
248  MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
249  assert(yielded_workers() < active_workers(), "Consistency check");
250  #ifndef PRODUCT
251    switch (yielding_task()->status()) {
252      // allowed states
253      case ACTIVE:
254      case ABORTING:
255      case COMPLETING:
256      case YIELDING:
257        break;
258      // not allowed states
259      case INACTIVE:
260      case ABORTED:
261      case COMPLETED:
262      case YIELDED:
263      default:
264        ShouldNotReachHere();
265    }
266  #endif // !PRODUCT
267  Status prev_status = yielding_task()->status();
268  yielding_task()->set_status(ABORTING);
269  if (prev_status == YIELDING) {
270    assert(yielded_workers() > 0, "Inconsistency");
271    // At least one thread has yielded, wake it up
272    // so it can go back to waiting stations ASAP.
273    monitor()->notify_all();
274  }
275}
276
277///////////////////////////////
278// YieldingFlexibleGangTask
279///////////////////////////////
280void YieldingFlexibleGangTask::yield() {
281  assert(gang() != NULL, "No gang to signal");
282  gang()->yield();
283}
284
285void YieldingFlexibleGangTask::abort() {
286  assert(gang() != NULL, "No gang to signal");
287  gang()->abort();
288}
289
290///////////////////////////////
291// YieldingFlexibleGangWorker
292///////////////////////////////
293void YieldingFlexibleGangWorker::loop() {
294  int previous_sequence_number = 0;
295  Monitor* gang_monitor = gang()->monitor();
296  MutexLockerEx ml(gang_monitor, Mutex::_no_safepoint_check_flag);
297  WorkData data;
298  int id;
299  while (true) {
300    // Check if there is work to do.
301    gang()->internal_worker_poll(&data);
302    if (data.task() != NULL && data.sequence_number() != previous_sequence_number) {
303      // There is work to be done.
304      // First check if we need to become active or if there
305      // are already the requisite number of workers
306      if (gang()->started_workers() == yf_gang()->active_workers()) {
307        // There are already enough workers, we do not need to
308        // to run; fall through and wait on monitor.
309      } else {
310        // We need to pitch in and do the work.
311        assert(gang()->started_workers() < yf_gang()->active_workers(),
312               "Unexpected state");
313        id = gang()->started_workers();
314        gang()->internal_note_start();
315        // Now, release the gang mutex and do the work.
316        {
317          MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag);
318          data.task()->work(id);   // This might include yielding
319        }
320        // Reacquire monitor and note completion of this worker
321        gang()->internal_note_finish();
322        // Update status of task based on whether all workers have
323        // finished or some have yielded
324        assert(data.task() == gang()->task(), "Confused task binding");
325        if (gang()->finished_workers() == yf_gang()->active_workers()) {
326          switch (data.yf_task()->status()) {
327            case ABORTING: {
328              data.yf_task()->set_status(ABORTED);
329              break;
330            }
331            case ACTIVE:
332            case COMPLETING: {
333              data.yf_task()->set_status(COMPLETED);
334              break;
335            }
336            default:
337              ShouldNotReachHere();
338          }
339          gang_monitor->notify_all();  // Notify overseer
340        } else { // at least one worker is still working or yielded
341          assert(gang()->finished_workers() < yf_gang()->active_workers(),
342                 "Counts inconsistent");
343          switch (data.yf_task()->status()) {
344            case ACTIVE: {
345              // first, but not only thread to complete
346              data.yf_task()->set_status(COMPLETING);
347              break;
348            }
349            case YIELDING: {
350              if (gang()->finished_workers() + yf_gang()->yielded_workers()
351                  == yf_gang()->active_workers()) {
352                data.yf_task()->set_status(YIELDED);
353                gang_monitor->notify_all();  // notify overseer
354              }
355              break;
356            }
357            case ABORTING:
358            case COMPLETING: {
359              break; // nothing to do
360            }
361            default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED
362              ShouldNotReachHere();
363          }
364        }
365      }
366    }
367    // Remember the sequence number
368    previous_sequence_number = data.sequence_number();
369    // Wait for more work
370    gang_monitor->wait(Mutex::_no_safepoint_check_flag);
371  }
372}
373