1/* Threads compatibility routines for libgcc2 and libobjc for VxWorks.  */
2/* Compile this one with gcc.  */
3/* Copyright (C) 1997-2020 Free Software Foundation, Inc.
4   Contributed by Mike Stump <mrs@wrs.com>.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16for more details.
17
18Under Section 7 of GPL version 3, you are granted additional
19permissions described in the GCC Runtime Library Exception, version
203.1, as published by the Free Software Foundation.
21
22You should have received a copy of the GNU General Public License and
23a copy of the GCC Runtime Library Exception along with this program;
24see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
25<http://www.gnu.org/licenses/>.  */
26
27#ifndef GCC_GTHR_VXWORKS_H
28#define GCC_GTHR_VXWORKS_H
29
30#ifdef _LIBOBJC
31
32/* libobjc requires the optional pthreads component.  */
33#include "gthr-posix.h"
34
35#else
36
37#include <vxWorks.h>
38#include <_vxworks-versions.h>
39
40/* Some VxWorks headers profusely use typedefs of a pointer to a function with
41   undefined number of arguments.  */
42#pragma GCC diagnostic push
43  #pragma GCC diagnostic ignored "-Wstrict-prototypes"
44  #include <semLib.h>
45#pragma GCC diagnostic pop
46
47#include <errnoLib.h>
48
49
50/* --------------------- Test & Set/Swap internal API --------------------- */
51
52/* We use a bare atomic primitive with busy loops to handle mutual exclusion.
53   Inefficient, but reliable.  The actual primitive used depends on the mode
54   (RTP vs Kernel) and the version of VxWorks.  We define a macro and a type
55   here, for reuse without conditionals cluttering in the code afterwards.  */
56
57/* RTP, pre 6.9.  */
58
59#if defined(__RTP__) && _VXWORKS_PRE(6,9)
60
61#define __TAS(x) vxCas ((x), 0, 1)
62typedef volatile unsigned char __vx_tas_t;
63
64#endif
65
66/* RTP, 6.9 and beyond.  */
67
68#if defined(__RTP__) && !_VXWORKS_PRE(6,9)
69
70#define __TAS(x) vxAtomicCas ((x), 0, 1)
71typedef atomic_t __vx_tas_t;
72
73/* Our implementation will need the system headers to use the vxAtomic
74   primitives.  Other includers won't and could actually be incompatible
75   with this inclusion, for instance libstdc++ sources compiled in C++
76   98 mode while AtomicLib for C++ requires C++ 11 at least.  */
77
78#if defined(IN_LIBGCC2)
79#include <vxAtomicLib.h>
80#endif
81
82#endif
83
84/* Kernel */
85
86#if !defined(__RTP__)
87
88#define __TAS(x) vxTas (x)
89typedef volatile unsigned char __vx_tas_t;
90
91#endif
92
93#ifdef __cplusplus
94extern "C" {
95#endif
96
97/* ------------------------ Base __GTHREADS support ----------------------- */
98
99#define __GTHREADS 1
100#define __gthread_active_p() 1
101
102/* Mutexes are easy, except that they need to be initialized at runtime.  */
103
104/* All VxWorks mutexes are recursive.  */
105typedef SEM_ID __gthread_mutex_t;
106typedef SEM_ID __gthread_recursive_mutex_t;
107#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init
108#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init
109
110#define __CHECK_RESULT(result) (((result) == OK) ? OK : errnoGet())
111
112/* If a call to the VxWorks API fails, we must propagate the errno value.  */
113#define __RETURN_ERRNO_IF_NOT_OK(exp) if ((exp) != OK) return errnoGet()
114
115/* Non re-entrant mutex implementation. Libstdc++ expects the default
116   gthread mutex to be non reentrant.  */
117
118static inline void
119__gthread_mutex_init (__gthread_mutex_t * __mutex)
120{
121  if (!__mutex)
122    return;
123  *__mutex = semBCreate (SEM_Q_PRIORITY, SEM_FULL);
124}
125
126static inline int
127__gthread_mutex_destroy (__gthread_mutex_t * __mutex)
128{
129  if (!__mutex)
130    return ERROR;
131  return __CHECK_RESULT (semDelete (*__mutex));
132}
133
134static inline int
135__gthread_mutex_lock (__gthread_mutex_t * __mutex)
136{
137  if (!__mutex)
138    return ERROR;
139  return __CHECK_RESULT (semTake(*__mutex, WAIT_FOREVER));
140}
141
142static inline int
143__gthread_mutex_trylock (__gthread_mutex_t * __mutex)
144{
145  if (!__mutex)
146    return ERROR;
147  return __CHECK_RESULT (semTake (*__mutex, NO_WAIT));
148}
149
150static inline int
151__gthread_mutex_unlock (__gthread_mutex_t * __mutex)
152{
153  if (!__mutex)
154    return ERROR;
155  return __CHECK_RESULT (semGive (*__mutex));
156}
157
158/* Recursive mutex implementation. The only change is that we use semMCreate()
159   instead of semBCreate().  */
160
161static inline void
162__gthread_recursive_mutex_init (__gthread_recursive_mutex_t * __mutex)
163{
164  if (!__mutex)
165    return;
166  *__mutex =
167    semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
168}
169
170static inline int
171__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t * __mutex)
172{
173  return __gthread_mutex_destroy (__mutex);
174}
175
176static inline int
177__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t * __mutex)
178{
179  return __gthread_mutex_lock (__mutex);
180}
181
182static inline int
183__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t * __mutex)
184{
185  return __gthread_mutex_trylock (__mutex);
186}
187
188static inline int
189__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t * __mutex)
190{
191  return __gthread_mutex_unlock (__mutex);
192}
193
194typedef struct
195{
196  /* PPC's test-and-set kernel mode implementation requires a pointer aligned
197     object, of which it only sets the first byte.  We use padding in addition
198     to an alignment request here to maxmise the factors leading to the
199     desired actual alignment choice by the compiler.  */
200#if defined(__PPC__)
201  __attribute ((aligned (__alignof__ (void *))))
202#endif
203
204  __vx_tas_t busy;
205  volatile unsigned char done;
206
207#if !defined(__RTP__) && defined(__PPC__)
208  unsigned char pad1;
209  unsigned char pad2;
210#endif
211#if !defined(__RTP__) && defined(__PPC64__)
212  unsigned char pad3;
213  unsigned char pad4;
214  unsigned char pad5;
215  unsigned char pad6;
216#endif
217} __gthread_once_t;
218
219#define __GTHREAD_ONCE_INIT {}
220
221extern int __gthread_once (__gthread_once_t *__once, void (*__func)(void));
222
223/* All the TSD routines are sufficiently complex that they
224   need to be implemented out of line.  */
225
226typedef unsigned int __gthread_key_t;
227
228extern int __gthread_key_create (__gthread_key_t *__keyp,
229				 void (*__dtor)(void *));
230extern int __gthread_key_delete (__gthread_key_t __key);
231
232extern void *__gthread_getspecific (__gthread_key_t __key);
233extern int __gthread_setspecific (__gthread_key_t __key, void *__ptr);
234
235/* ------------------ Base condition variables support ------------------- */
236
237#define __GTHREAD_HAS_COND 1
238
239typedef SEM_ID __gthread_cond_t;
240
241#define __GTHREAD_COND_INIT_FUNCTION __gthread_cond_init
242
243/* Condition variable declarations.  */
244
245extern void __gthread_cond_init (__gthread_cond_t *cond);
246
247extern int __gthread_cond_destroy (__gthread_cond_t *cond);
248
249extern int __gthread_cond_broadcast (__gthread_cond_t *cond);
250
251extern int __gthread_cond_wait (__gthread_cond_t *cond,
252				__gthread_mutex_t *mutex);
253
254extern int __gthread_cond_wait_recursive (__gthread_cond_t *cond,
255					  __gthread_recursive_mutex_t *mutex);
256
257/* -----------------------  C++0x thread support ------------------------- */
258
259/* We do not support C++0x threads on that VxWorks 653, which we can
260   recognize by VTHREADS being defined.  */
261
262#ifndef VTHREADS
263
264#define __GTHREADS_CXX0X 1
265
266#include <limits.h>
267#include <time.h>
268#include <tickLib.h>
269#include <sysLib.h>
270#include <version.h>
271
272typedef struct
273{
274  TASK_ID task_id;
275  void *return_value;
276
277  /* This mutex is used to block in join() while the return value is
278     unavailable.  */
279  __gthread_mutex_t return_value_available;
280
281  /* Before freeing the structure in the task wrapper, we need to wait until
282     join() or detach() are called on that thread.   */
283  __gthread_mutex_t delete_ok;
284} __gthread_tcb;
285
286typedef __gthread_tcb *__gthread_t;
287
288/* Typedefs specific to different vxworks versions.  */
289#if _VXW_PRE_69
290  typedef int _Vx_usr_arg_t;
291  #define TASK_ID_NULL ((TASK_ID)NULL)
292  #define SEM_ID_NULL ((SEM_ID)NULL)
293#endif
294
295typedef struct timespec __gthread_time_t;
296
297/* Timed mutex lock declarations.  */
298
299extern int __gthread_mutex_timedlock (__gthread_mutex_t *m,
300				      const __gthread_time_t *abs_time);
301
302extern int __gthread_recursive_mutex_timedlock
303  (__gthread_recursive_mutex_t *mutex,
304   const __gthread_time_t *abs_timeout);
305
306/* Timed condition variable declarations.  */
307
308extern int __gthread_cond_signal (__gthread_cond_t *cond);
309extern int __gthread_cond_timedwait (__gthread_cond_t *cond,
310				     __gthread_mutex_t *mutex,
311				     const __gthread_time_t *abs_timeout);
312
313/* gthreads declarations.  */
314
315extern int __gthread_equal (__gthread_t t1, __gthread_t t2);
316extern int __gthread_yield (void);
317extern int __gthread_create (__gthread_t *__threadid,
318			     void *(*__func) (void*),
319			     void *__args);
320extern int __gthread_join (__gthread_t thread, void **value_ptr);
321extern int __gthread_detach (__gthread_t thread);
322
323extern __gthread_t __gthread_self (void);
324
325#endif
326
327#ifdef __cplusplus
328}
329#endif
330
331#endif /* not _LIBOBJC */
332
333#endif /* gthr-vxworks.h */
334