• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/toolchains/hndtools-arm-linux-2.6.36-uclibc-4.5.3/arm-linux/include/c++/4.5.3/bits/
1// -*- C++ -*- header.
2
3// Copyright (C) 2008, 2009, 2010, 2011
4// Free Software Foundation, Inc.
5//
6// This file is part of the GNU ISO C++ Library.  This library is free
7// software; you can redistribute it and/or modify it under the
8// terms of the GNU General Public License as published by the
9// Free Software Foundation; either version 3, or (at your option)
10// any later version.
11
12// This library is distributed in the hope that it will be useful,
13// but WITHOUT ANY WARRANTY; without even the implied warranty of
14// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15// GNU General Public License for more details.
16
17// Under Section 7 of GPL version 3, you are granted additional
18// permissions described in the GCC Runtime Library Exception, version
19// 3.1, as published by the Free Software Foundation.
20
21// You should have received a copy of the GNU General Public License and
22// a copy of the GCC Runtime Library Exception along with this program;
23// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24// <http://www.gnu.org/licenses/>.
25
26/** @file bits/atomic_0.h
27 *  This is an internal header file, included by other library headers.
28 *  You should not attempt to use it directly.
29 */
30
31#ifndef _GLIBCXX_ATOMIC_0_H
32#define _GLIBCXX_ATOMIC_0_H 1
33
34#pragma GCC system_header
35
36// _GLIBCXX_BEGIN_NAMESPACE(std)
37
38  // 0 == __atomic0 == Never lock-free
39namespace __atomic0
40{
41  struct atomic_flag;
42
43  // Implementation specific defines.
44#define _ATOMIC_LOAD_(__a, __x)						   \
45  ({__typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_;	   		   \
46    __atomic_flag_base* __g = __atomic_flag_for_address(__p);	  	   \
47    __atomic_flag_wait_explicit(__g, __x);				   \
48    __typeof__ _ATOMIC_MEMBER_ __r = *__p;				   \
49    atomic_flag_clear_explicit(__g, __x);		       		   \
50    __r; })
51
52#define _ATOMIC_STORE_(__a, __n, __x)					   \
53  ({__typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_;	   		   \
54    __typeof__(__n) __w = (__n);			       		   \
55    __atomic_flag_base* __g = __atomic_flag_for_address(__p);	  	   \
56    __atomic_flag_wait_explicit(__g, __x);				   \
57    *__p = __w;								   \
58    atomic_flag_clear_explicit(__g, __x);		       		   \
59    __w; })
60
61#define _ATOMIC_MODIFY_(__a, __o, __n, __x)				   \
62  ({__typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_;	   		   \
63    __typeof__(__n) __w = (__n);			       		   \
64    __atomic_flag_base* __g = __atomic_flag_for_address(__p);	  	   \
65    __atomic_flag_wait_explicit(__g, __x);				   \
66    __typeof__ _ATOMIC_MEMBER_ __r = *__p;				   \
67    *__p __o __w;					       		   \
68    atomic_flag_clear_explicit(__g, __x);		       		   \
69    __r; })
70
71#define _ATOMIC_CMPEXCHNG_(__a, __e, __n, __x)				   \
72  ({__typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_;	   		   \
73    __typeof__(__e) __q = (__e);			       		   \
74    __typeof__(__n) __w = (__n);			       		   \
75    bool __r;						       		   \
76    __atomic_flag_base* __g = __atomic_flag_for_address(__p);	   	   \
77    __atomic_flag_wait_explicit(__g, __x);				   \
78    __typeof__ _ATOMIC_MEMBER_ __t__ = *__p;		       		   \
79    if (__t__ == *__q) { *__p = __w; __r = true; }			   \
80    else { *__q = __t__; __r = false; }		       			   \
81    atomic_flag_clear_explicit(__g, __x);		       		   \
82    __r; })
83
84  /// atomic_flag
85  struct atomic_flag : public __atomic_flag_base
86  {
87    atomic_flag() = default;
88    ~atomic_flag() = default;
89    atomic_flag(const atomic_flag&) = delete;
90    atomic_flag& operator=(const atomic_flag&) volatile = delete;
91
92    // Conversion to ATOMIC_FLAG_INIT.
93    atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
94
95    bool
96    test_and_set(memory_order __m = memory_order_seq_cst);
97
98    void
99    clear(memory_order __m = memory_order_seq_cst);
100  };
101
102  /// 29.4.2, address types
103  struct atomic_address
104  {
105  private:
106    void* _M_i;
107
108  public:
109    atomic_address() = default;
110    ~atomic_address() = default;
111    atomic_address(const atomic_address&) = delete;
112    atomic_address& operator=(const atomic_address&) volatile = delete;
113
114    atomic_address(void* __v) { _M_i = __v; }
115
116    bool
117    is_lock_free() const
118    { return false; }
119
120    void
121    store(void* __v, memory_order __m = memory_order_seq_cst)
122    {
123      __glibcxx_assert(__m != memory_order_acquire);
124      __glibcxx_assert(__m != memory_order_acq_rel);
125      __glibcxx_assert(__m != memory_order_consume);
126      _ATOMIC_STORE_(this, __v, __m);
127    }
128
129    void*
130    load(memory_order __m = memory_order_seq_cst) const
131    {
132      __glibcxx_assert(__m != memory_order_release);
133      __glibcxx_assert(__m != memory_order_acq_rel);
134      return _ATOMIC_LOAD_(this, __m);
135    }
136
137    void*
138    exchange(void* __v, memory_order __m = memory_order_seq_cst)
139    { return _ATOMIC_MODIFY_(this, =, __v, __m); }
140
141    bool
142    compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
143			  memory_order __m2)
144    {
145      __glibcxx_assert(__m2 != memory_order_release);
146      __glibcxx_assert(__m2 != memory_order_acq_rel);
147      __glibcxx_assert(__m2 <= __m1);
148      return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
149    }
150
151    bool
152    compare_exchange_weak(void*& __v1, void* __v2,
153			  memory_order __m = memory_order_seq_cst)
154    {
155      return compare_exchange_weak(__v1, __v2, __m,
156				   __calculate_memory_order(__m));
157    }
158
159    bool
160    compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
161			    memory_order __m2)
162    {
163      __glibcxx_assert(__m2 != memory_order_release);
164      __glibcxx_assert(__m2 != memory_order_acq_rel);
165      __glibcxx_assert(__m2 <= __m1);
166      return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
167    }
168
169    bool
170    compare_exchange_strong(void*& __v1, void* __v2,
171			  memory_order __m = memory_order_seq_cst)
172    {
173      return compare_exchange_strong(__v1, __v2, __m,
174				     __calculate_memory_order(__m));
175    }
176
177    void*
178    fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
179    {
180      void** __p = &(_M_i);
181      __atomic_flag_base* __g = __atomic_flag_for_address(__p);
182      __atomic_flag_wait_explicit(__g, __m);
183      void* __r = *__p;
184      *__p = (void*)((char*)(*__p) + __d);
185      atomic_flag_clear_explicit(__g, __m);
186      return __r;
187    }
188
189    void*
190    fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
191    {
192      void** __p = &(_M_i);
193      __atomic_flag_base* __g = __atomic_flag_for_address(__p);
194      __atomic_flag_wait_explicit(__g, __m);
195      void* __r = *__p;
196      *__p = (void*)((char*)(*__p) - __d);
197      atomic_flag_clear_explicit(__g, __m);
198      return __r;
199    }
200
201    operator void*() const
202    { return load(); }
203
204    void*
205    operator=(void* __v)
206    {
207      store(__v);
208      return __v;
209    }
210
211    void*
212    operator+=(ptrdiff_t __d)
213    { return fetch_add(__d) + __d; }
214
215    void*
216    operator-=(ptrdiff_t __d)
217    { return fetch_sub(__d) - __d; }
218  };
219
220
221  // 29.3.1 atomic integral types
222  // For each of the integral types, define atomic_[integral type] struct
223  //
224  // atomic_bool     bool
225  // atomic_char     char
226  // atomic_schar    signed char
227  // atomic_uchar    unsigned char
228  // atomic_short    short
229  // atomic_ushort   unsigned short
230  // atomic_int      int
231  // atomic_uint     unsigned int
232  // atomic_long     long
233  // atomic_ulong    unsigned long
234  // atomic_llong    long long
235  // atomic_ullong   unsigned long long
236  // atomic_char16_t char16_t
237  // atomic_char32_t char32_t
238  // atomic_wchar_t  wchar_t
239
240  // Base type.
241  // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
242  // since that is what GCC built-in functions for atomic memory access work on.
243  template<typename _ITp>
244    struct __atomic_base
245    {
246    private:
247      typedef _ITp 	__integral_type;
248
249      __integral_type 	_M_i;
250
251    public:
252      __atomic_base() = default;
253      ~__atomic_base() = default;
254      __atomic_base(const __atomic_base&) = delete;
255      __atomic_base& operator=(const __atomic_base&) volatile = delete;
256
257      // Requires __integral_type convertible to _M_base._M_i.
258      __atomic_base(__integral_type __i) { _M_i = __i; }
259
260      operator __integral_type() const
261      { return load(); }
262
263      __integral_type
264      operator=(__integral_type __i)
265      {
266	store(__i);
267	return __i;
268      }
269
270      __integral_type
271      operator++(int)
272      { return fetch_add(1); }
273
274      __integral_type
275      operator--(int)
276      { return fetch_sub(1); }
277
278      __integral_type
279      operator++()
280      { return fetch_add(1) + 1; }
281
282      __integral_type
283      operator--()
284      { return fetch_sub(1) - 1; }
285
286      __integral_type
287      operator+=(__integral_type __i)
288      { return fetch_add(__i) + __i; }
289
290      __integral_type
291      operator-=(__integral_type __i)
292      { return fetch_sub(__i) - __i; }
293
294      __integral_type
295      operator&=(__integral_type __i)
296      { return fetch_and(__i) & __i; }
297
298      __integral_type
299      operator|=(__integral_type __i)
300      { return fetch_or(__i) | __i; }
301
302      __integral_type
303      operator^=(__integral_type __i)
304      { return fetch_xor(__i) ^ __i; }
305
306      bool
307      is_lock_free() const
308      { return false; }
309
310      void
311      store(__integral_type __i, memory_order __m = memory_order_seq_cst)
312      {
313	__glibcxx_assert(__m != memory_order_acquire);
314	__glibcxx_assert(__m != memory_order_acq_rel);
315	__glibcxx_assert(__m != memory_order_consume);
316	_ATOMIC_STORE_(this, __i, __m);
317      }
318
319      __integral_type
320      load(memory_order __m = memory_order_seq_cst) const
321      {
322	__glibcxx_assert(__m != memory_order_release);
323	__glibcxx_assert(__m != memory_order_acq_rel);
324	return _ATOMIC_LOAD_(this, __m);
325      }
326
327      __integral_type
328      exchange(__integral_type __i, memory_order __m = memory_order_seq_cst)
329      { return _ATOMIC_MODIFY_(this, =, __i, __m); }
330
331      bool
332      compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
333			    memory_order __m1, memory_order __m2)
334      {
335	__glibcxx_assert(__m2 != memory_order_release);
336	__glibcxx_assert(__m2 != memory_order_acq_rel);
337	__glibcxx_assert(__m2 <= __m1);
338	return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
339      }
340
341      bool
342      compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
343			    memory_order __m = memory_order_seq_cst)
344      {
345	return compare_exchange_weak(__i1, __i2, __m,
346				     __calculate_memory_order(__m));
347      }
348
349      bool
350      compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
351			      memory_order __m1, memory_order __m2)
352      {
353	__glibcxx_assert(__m2 != memory_order_release);
354	__glibcxx_assert(__m2 != memory_order_acq_rel);
355	__glibcxx_assert(__m2 <= __m1);
356	return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
357      }
358
359      bool
360      compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
361			      memory_order __m = memory_order_seq_cst)
362      {
363	return compare_exchange_strong(__i1, __i2, __m,
364				       __calculate_memory_order(__m));
365      }
366
367      __integral_type
368      fetch_add(__integral_type __i, memory_order __m = memory_order_seq_cst)
369      { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
370
371      __integral_type
372      fetch_sub(__integral_type __i, memory_order __m = memory_order_seq_cst)
373      { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
374
375      __integral_type
376      fetch_and(__integral_type __i, memory_order __m = memory_order_seq_cst)
377      { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
378
379      __integral_type
380      fetch_or(__integral_type __i, memory_order __m = memory_order_seq_cst)
381      { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
382
383      __integral_type
384      fetch_xor(__integral_type __i, memory_order __m = memory_order_seq_cst)
385      { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
386    };
387
388
389  /// atomic_bool
390  // NB: No operators or fetch-operations for this type.
391  struct atomic_bool
392  {
393  private:
394    __atomic_base<bool>	_M_base;
395
396  public:
397    atomic_bool() = default;
398    ~atomic_bool() = default;
399    atomic_bool(const atomic_bool&) = delete;
400    atomic_bool& operator=(const atomic_bool&) volatile = delete;
401
402    atomic_bool(bool __i) : _M_base(__i) { }
403
404    bool
405    operator=(bool __i)
406    { return _M_base.operator=(__i); }
407
408    operator bool() const
409    { return _M_base.load(); }
410
411    bool
412    is_lock_free() const
413    { return _M_base.is_lock_free(); }
414
415    void
416    store(bool __i, memory_order __m = memory_order_seq_cst)
417    { _M_base.store(__i, __m); }
418
419    bool
420    load(memory_order __m = memory_order_seq_cst) const
421    { return _M_base.load(__m); }
422
423    bool
424    exchange(bool __i, memory_order __m = memory_order_seq_cst)
425    { return _M_base.exchange(__i, __m); }
426
427    bool
428    compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
429			  memory_order __m2)
430    { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
431
432    bool
433    compare_exchange_weak(bool& __i1, bool __i2,
434			  memory_order __m = memory_order_seq_cst)
435    { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
436
437    bool
438    compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
439			    memory_order __m2)
440    { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
441
442
443    bool
444    compare_exchange_strong(bool& __i1, bool __i2,
445			    memory_order __m = memory_order_seq_cst)
446    { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
447  };
448
449#undef _ATOMIC_LOAD_
450#undef _ATOMIC_STORE_
451#undef _ATOMIC_MODIFY_
452#undef _ATOMIC_CMPEXCHNG_
453} // namespace __atomic0
454
455// _GLIBCXX_END_NAMESPACE
456
457#endif
458