ordered.c revision 169695
1/* Copyright (C) 2005 Free Software Foundation, Inc.
2   Contributed by Richard Henderson <rth@redhat.com>.
3
4   This file is part of the GNU OpenMP Library (libgomp).
5
6   Libgomp is free software; you can redistribute it and/or modify it
7   under the terms of the GNU Lesser General Public License as published by
8   the Free Software Foundation; either version 2.1 of the License, or
9   (at your option) any later version.
10
11   Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13   FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
14   more details.
15
16   You should have received a copy of the GNU Lesser General Public License
17   along with libgomp; see the file COPYING.LIB.  If not, write to the
18   Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19   MA 02110-1301, USA.  */
20
21/* As a special exception, if you link this library with other files, some
22   of which are compiled with GCC, to produce an executable, this library
23   does not by itself cause the resulting executable to be covered by the
24   GNU General Public License.  This exception does not however invalidate
25   any other reasons why the executable file might be covered by the GNU
26   General Public License.  */
27
28/* This file handles the ORDERED construct.  */
29
30#include "libgomp.h"
31
32
33/* This function is called when first allocating an iteration block.  That
34   is, the thread is not currently on the queue.  The work-share lock must
35   be held on entry.  */
36
37void
38gomp_ordered_first (void)
39{
40  struct gomp_thread *thr = gomp_thread ();
41  struct gomp_team *team = thr->ts.team;
42  struct gomp_work_share *ws = thr->ts.work_share;
43  unsigned index;
44
45  /* Work share constructs can be orphaned.  */
46  if (team == NULL || team->nthreads == 1)
47    return;
48
49  index = ws->ordered_cur + ws->ordered_num_used;
50  if (index >= team->nthreads)
51    index -= team->nthreads;
52  ws->ordered_team_ids[index] = thr->ts.team_id;
53
54  /* If this is the first and only thread in the queue, then there is
55     no one to release us when we get to our ordered section.  Post to
56     our own release queue now so that we won't block later.  */
57  if (ws->ordered_num_used++ == 0)
58    gomp_sem_post (team->ordered_release[thr->ts.team_id]);
59}
60
61/* This function is called when completing the last iteration block.  That
62   is, there are no more iterations to perform and so the thread should be
63   removed from the queue entirely.  Because of the way ORDERED blocks are
64   managed, it follows that we currently own access to the ORDERED block,
65   and should now pass it on to the next thread.  The work-share lock must
66   be held on entry.  */
67
68void
69gomp_ordered_last (void)
70{
71  struct gomp_thread *thr = gomp_thread ();
72  struct gomp_team *team = thr->ts.team;
73  struct gomp_work_share *ws = thr->ts.work_share;
74  unsigned next_id;
75
76  /* Work share constructs can be orphaned.  */
77  if (team == NULL || team->nthreads == 1)
78    return;
79
80  /* We're no longer the owner.  */
81  ws->ordered_owner = -1;
82
83  /* If we're not the last thread in the queue, then wake the next.  */
84  if (--ws->ordered_num_used > 0)
85    {
86      unsigned next = ws->ordered_cur + 1;
87      if (next == team->nthreads)
88	next = 0;
89      ws->ordered_cur = next;
90
91      next_id = ws->ordered_team_ids[next];
92      gomp_sem_post (team->ordered_release[next_id]);
93    }
94}
95
96
97/* This function is called when allocating a subsequent allocation block.
98   That is, we're done with the current iteration block and we're allocating
99   another.  This is the logical combination of a call to gomp_ordered_last
100   followed by a call to gomp_ordered_first.  The work-share lock must be
101   held on entry. */
102
103void
104gomp_ordered_next (void)
105{
106  struct gomp_thread *thr = gomp_thread ();
107  struct gomp_team *team = thr->ts.team;
108  struct gomp_work_share *ws = thr->ts.work_share;
109  unsigned index, next_id;
110
111  /* Work share constructs can be orphaned.  */
112  if (team == NULL || team->nthreads == 1)
113    return;
114
115  /* We're no longer the owner.  */
116  ws->ordered_owner = -1;
117
118  /* If there's only one thread in the queue, that must be us.  */
119  if (ws->ordered_num_used == 1)
120    {
121      /* We have a similar situation as in gomp_ordered_first
122	 where we need to post to our own release semaphore.  */
123      gomp_sem_post (team->ordered_release[thr->ts.team_id]);
124      return;
125    }
126
127  /* If the queue is entirely full, then we move ourself to the end of
128     the queue merely by incrementing ordered_cur.  Only if it's not
129     full do we have to write our id.  */
130  if (ws->ordered_num_used < team->nthreads)
131    {
132      index = ws->ordered_cur + ws->ordered_num_used;
133      if (index >= team->nthreads)
134	index -= team->nthreads;
135      ws->ordered_team_ids[index] = thr->ts.team_id;
136    }
137
138  index = ws->ordered_cur + 1;
139  if (index == team->nthreads)
140    index = 0;
141  ws->ordered_cur = index;
142
143  next_id = ws->ordered_team_ids[index];
144  gomp_sem_post (team->ordered_release[next_id]);
145}
146
147
148/* This function is called when a statically scheduled loop is first
149   being created.  */
150
151void
152gomp_ordered_static_init (void)
153{
154  struct gomp_thread *thr = gomp_thread ();
155  struct gomp_team *team = thr->ts.team;
156
157  if (team == NULL || team->nthreads == 1)
158    return;
159
160  gomp_sem_post (team->ordered_release[0]);
161}
162
163/* This function is called when a statically scheduled loop is moving to
164   the next allocation block.  Static schedules are not first come first
165   served like the others, so we're to move to the numerically next thread,
166   not the next thread on a list.  The work-share lock should *not* be held
167   on entry.  */
168
169void
170gomp_ordered_static_next (void)
171{
172  struct gomp_thread *thr = gomp_thread ();
173  struct gomp_team *team = thr->ts.team;
174  struct gomp_work_share *ws = thr->ts.work_share;
175  unsigned id = thr->ts.team_id;
176
177  if (team == NULL || team->nthreads == 1)
178    return;
179
180  ws->ordered_owner = -1;
181
182  /* This thread currently owns the lock.  Increment the owner.  */
183  if (++id == team->nthreads)
184    id = 0;
185  ws->ordered_team_ids[0] = id;
186  gomp_sem_post (team->ordered_release[id]);
187}
188
189/* This function is called when we need to assert that the thread owns the
190   ordered section.  Due to the problem of posted-but-not-waited semaphores,
191   this needs to happen before completing a loop iteration.  */
192
193void
194gomp_ordered_sync (void)
195{
196  struct gomp_thread *thr = gomp_thread ();
197  struct gomp_team *team = thr->ts.team;
198  struct gomp_work_share *ws = thr->ts.work_share;
199
200  /* Work share constructs can be orphaned.  But this clearly means that
201     we are the only thread, and so we automatically own the section.  */
202  if (team == NULL || team->nthreads == 1)
203    return;
204
205  /* ??? I believe it to be safe to access this data without taking the
206     ws->lock.  The only presumed race condition is with the previous
207     thread on the queue incrementing ordered_cur such that it points
208     to us, concurrently with our check below.  But our team_id is
209     already present in the queue, and the other thread will always
210     post to our release semaphore.  So the two cases are that we will
211     either win the race an momentarily block on the semaphore, or lose
212     the race and find the semaphore already unlocked and so not block.
213     Either way we get correct results.  */
214
215  if (ws->ordered_owner != thr->ts.team_id)
216    {
217      gomp_sem_wait (team->ordered_release[thr->ts.team_id]);
218      ws->ordered_owner = thr->ts.team_id;
219    }
220}
221
222/* This function is called by user code when encountering the start of an
223   ORDERED block.  We must check to see if the current thread is at the
224   head of the queue, and if not, block.  */
225
226#ifdef HAVE_ATTRIBUTE_ALIAS
227extern void GOMP_ordered_start (void)
228	__attribute__((alias ("gomp_ordered_sync")));
229#else
230void
231GOMP_ordered_start (void)
232{
233  gomp_ordered_sync ();
234}
235#endif
236
237/* This function is called by user code when encountering the end of an
238   ORDERED block.  With the current ORDERED implementation there's nothing
239   for us to do.
240
241   However, the current implementation has a flaw in that it does not allow
242   the next thread into the ORDERED section immediately after the current
243   thread exits the ORDERED section in its last iteration.  The existance
244   of this function allows the implementation to change.  */
245
246void
247GOMP_ordered_end (void)
248{
249}
250