1// <mutex> -*- C++ -*-
2
3// Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4// Free Software Foundation, Inc.
5//
6// This file is part of the GNU ISO C++ Library.  This library is free
7// software; you can redistribute it and/or modify it under the
8// terms of the GNU General Public License as published by the
9// Free Software Foundation; either version 3, or (at your option)
10// any later version.
11
12// This library is distributed in the hope that it will be useful,
13// but WITHOUT ANY WARRANTY; without even the implied warranty of
14// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15// GNU General Public License for more details.
16
17// Under Section 7 of GPL version 3, you are granted additional
18// permissions described in the GCC Runtime Library Exception, version
19// 3.1, as published by the Free Software Foundation.
20
21// You should have received a copy of the GNU General Public License and
22// a copy of the GCC Runtime Library Exception along with this program;
23// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24// <http://www.gnu.org/licenses/>.
25
26/** @file mutex
27 *  This is a Standard C++ Library header.
28 */
29
30#ifndef _GLIBCXX_MUTEX
31#define _GLIBCXX_MUTEX 1
32
33#pragma GCC system_header
34
35#ifndef __GXX_EXPERIMENTAL_CXX0X__
36# include <bits/c++0x_warning.h>
37#else
38
39#include <tuple>
40#include <cstddef>
41#include <chrono>
42#include <exception>
43#include <type_traits>
44#include <functional>
45#include <system_error>
46#include <bits/functexcept.h>
47#include <bits/gthr.h>
48#include <bits/move.h> // for std::swap
49
50#if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
51
52namespace std
53{
54  /**
55   * @defgroup mutexes Mutexes
56   * @ingroup concurrency
57   *
58   * Classes for mutex support.
59   * @{
60   */
61
62  /// mutex
63  class mutex
64  {
65    typedef __gthread_mutex_t			__native_type;
66    __native_type  _M_mutex;
67
68  public:
69    typedef __native_type* 			native_handle_type;
70
71    mutex()
72    {
73      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
74#ifdef __GTHREAD_MUTEX_INIT
75      __native_type __tmp = __GTHREAD_MUTEX_INIT;
76      _M_mutex = __tmp;
77#else
78      __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
79#endif
80    }
81
82    mutex(const mutex&) = delete;
83    mutex& operator=(const mutex&) = delete;
84
85    void
86    lock()
87    {
88      int __e = __gthread_mutex_lock(&_M_mutex);
89
90      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
91      if (__e)
92	__throw_system_error(__e);
93    }
94
95    bool
96    try_lock()
97    {
98      // XXX EINVAL, EAGAIN, EBUSY
99      return !__gthread_mutex_trylock(&_M_mutex);
100    }
101
102    void
103    unlock()
104    {
105      // XXX EINVAL, EAGAIN, EPERM
106      __gthread_mutex_unlock(&_M_mutex);
107    }
108
109    native_handle_type
110    native_handle()
111    { return &_M_mutex; }
112  };
113
114  /// recursive_mutex
115  class recursive_mutex
116  {
117    typedef __gthread_recursive_mutex_t		__native_type;
118    __native_type  _M_mutex;
119
120  public:
121    typedef __native_type* 			native_handle_type;
122
123    recursive_mutex()
124    {
125      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
126#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
127      __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
128      _M_mutex = __tmp;
129#else
130      __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
131#endif
132    }
133
134    recursive_mutex(const recursive_mutex&) = delete;
135    recursive_mutex& operator=(const recursive_mutex&) = delete;
136
137    void
138    lock()
139    {
140      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
141
142      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
143      if (__e)
144	__throw_system_error(__e);
145    }
146
147    bool
148    try_lock()
149    {
150      // XXX EINVAL, EAGAIN, EBUSY
151      return !__gthread_recursive_mutex_trylock(&_M_mutex);
152    }
153
154    void
155    unlock()
156    {
157      // XXX EINVAL, EAGAIN, EBUSY
158      __gthread_recursive_mutex_unlock(&_M_mutex);
159    }
160
161    native_handle_type
162    native_handle()
163    { return &_M_mutex; }
164  };
165
166  /// timed_mutex
167  class timed_mutex
168  {
169    typedef __gthread_mutex_t 		  	__native_type;
170
171#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
172    typedef chrono::monotonic_clock 	  	__clock_t;
173#else
174    typedef chrono::high_resolution_clock 	__clock_t;
175#endif
176
177    __native_type  _M_mutex;
178
179  public:
180    typedef __native_type* 		  	native_handle_type;
181
182    timed_mutex()
183    {
184#ifdef __GTHREAD_MUTEX_INIT
185      __native_type __tmp = __GTHREAD_MUTEX_INIT;
186      _M_mutex = __tmp;
187#else
188      __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
189#endif
190    }
191
192    timed_mutex(const timed_mutex&) = delete;
193    timed_mutex& operator=(const timed_mutex&) = delete;
194
195    void
196    lock()
197    {
198      int __e = __gthread_mutex_lock(&_M_mutex);
199
200      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
201      if (__e)
202	__throw_system_error(__e);
203    }
204
205    bool
206    try_lock()
207    {
208      // XXX EINVAL, EAGAIN, EBUSY
209      return !__gthread_mutex_trylock(&_M_mutex);
210    }
211
212    template <class _Rep, class _Period>
213      bool
214      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
215      { return __try_lock_for_impl(__rtime); }
216
217    template <class _Clock, class _Duration>
218      bool
219      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
220      {
221	chrono::time_point<_Clock, chrono::seconds> __s =
222	  chrono::time_point_cast<chrono::seconds>(__atime);
223
224	chrono::nanoseconds __ns =
225	  chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
226
227	__gthread_time_t __ts = {
228	  static_cast<std::time_t>(__s.time_since_epoch().count()),
229	  static_cast<long>(__ns.count())
230	};
231
232	return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
233      }
234
235    void
236    unlock()
237    {
238      // XXX EINVAL, EAGAIN, EBUSY
239      __gthread_mutex_unlock(&_M_mutex);
240    }
241
242    native_handle_type
243    native_handle()
244    { return &_M_mutex; }
245
246  private:
247    template<typename _Rep, typename _Period>
248      typename enable_if<
249	ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
250      __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
251      {
252	__clock_t::time_point __atime = __clock_t::now()
253	  + chrono::duration_cast<__clock_t::duration>(__rtime);
254
255	return try_lock_until(__atime);
256      }
257
258    template <typename _Rep, typename _Period>
259      typename enable_if<
260	!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
261      __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
262      {
263	__clock_t::time_point __atime = __clock_t::now()
264	  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
265
266	return try_lock_until(__atime);
267      }
268  };
269
270  /// recursive_timed_mutex
271  class recursive_timed_mutex
272  {
273    typedef __gthread_recursive_mutex_t		__native_type;
274
275#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
276    typedef chrono::monotonic_clock 		__clock_t;
277#else
278    typedef chrono::high_resolution_clock 	__clock_t;
279#endif
280
281    __native_type  _M_mutex;
282
283  public:
284    typedef __native_type* 			native_handle_type;
285
286    recursive_timed_mutex()
287    {
288      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
289#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
290      __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
291      _M_mutex = __tmp;
292#else
293      __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
294#endif
295    }
296
297    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
298    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
299
300    void
301    lock()
302    {
303      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
304
305      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
306      if (__e)
307	__throw_system_error(__e);
308    }
309
310    bool
311    try_lock()
312    {
313      // XXX EINVAL, EAGAIN, EBUSY
314      return !__gthread_recursive_mutex_trylock(&_M_mutex);
315    }
316
317    template <class _Rep, class _Period>
318      bool
319      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
320      { return __try_lock_for_impl(__rtime); }
321
322    template <class _Clock, class _Duration>
323      bool
324      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
325      {
326	chrono::time_point<_Clock, chrono::seconds>  __s =
327	  chrono::time_point_cast<chrono::seconds>(__atime);
328
329	chrono::nanoseconds __ns =
330	  chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
331
332	__gthread_time_t __ts = {
333	  static_cast<std::time_t>(__s.time_since_epoch().count()),
334	  static_cast<long>(__ns.count())
335	};
336
337	return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
338      }
339
340    void
341    unlock()
342    {
343      // XXX EINVAL, EAGAIN, EBUSY
344      __gthread_recursive_mutex_unlock(&_M_mutex);
345    }
346
347    native_handle_type
348    native_handle()
349    { return &_M_mutex; }
350
351  private:
352    template<typename _Rep, typename _Period>
353      typename enable_if<
354	ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
355      __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
356      {
357	__clock_t::time_point __atime = __clock_t::now()
358	  + chrono::duration_cast<__clock_t::duration>(__rtime);
359
360	return try_lock_until(__atime);
361      }
362
363    template <typename _Rep, typename _Period>
364      typename enable_if<
365	!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
366      __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
367      {
368	__clock_t::time_point __atime = __clock_t::now()
369	  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
370
371	return try_lock_until(__atime);
372      }
373  };
374
375  /// Do not acquire ownership of the mutex.
376  struct defer_lock_t { };
377
378  /// Try to acquire ownership of the mutex without blocking.
379  struct try_to_lock_t { };
380
381  /// Assume the calling thread has already obtained mutex ownership
382  /// and manage it.
383  struct adopt_lock_t { };
384
385  extern const defer_lock_t	defer_lock;
386  extern const try_to_lock_t	try_to_lock;
387  extern const adopt_lock_t	adopt_lock;
388
389  /// @brief  Scoped lock idiom.
390  // Acquire the mutex here with a constructor call, then release with
391  // the destructor call in accordance with RAII style.
392  template<typename _Mutex>
393    class lock_guard
394    {
395    public:
396      typedef _Mutex mutex_type;
397
398      explicit lock_guard(mutex_type& __m) : _M_device(__m)
399      { _M_device.lock(); }
400
401      lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
402      { } // calling thread owns mutex
403
404      ~lock_guard()
405      { _M_device.unlock(); }
406
407      lock_guard(const lock_guard&) = delete;
408      lock_guard& operator=(const lock_guard&) = delete;
409
410    private:
411      mutex_type&  _M_device;
412    };
413
414  /// unique_lock
415  template<typename _Mutex>
416    class unique_lock
417    {
418    public:
419      typedef _Mutex mutex_type;
420
421      unique_lock()
422      : _M_device(0), _M_owns(false)
423      { }
424
425      explicit unique_lock(mutex_type& __m)
426      : _M_device(&__m), _M_owns(false)
427      {
428	lock();
429	_M_owns = true;
430      }
431
432      unique_lock(mutex_type& __m, defer_lock_t)
433      : _M_device(&__m), _M_owns(false)
434      { }
435
436      unique_lock(mutex_type& __m, try_to_lock_t)
437      : _M_device(&__m), _M_owns(_M_device->try_lock())
438      { }
439
440      unique_lock(mutex_type& __m, adopt_lock_t)
441      : _M_device(&__m), _M_owns(true)
442      {
443	// XXX calling thread owns mutex
444      }
445
446      template<typename _Clock, typename _Duration>
447	unique_lock(mutex_type& __m,
448		    const chrono::time_point<_Clock, _Duration>& __atime)
449	: _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
450	{ }
451
452      template<typename _Rep, typename _Period>
453	unique_lock(mutex_type& __m,
454		    const chrono::duration<_Rep, _Period>& __rtime)
455	: _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
456	{ }
457
458      ~unique_lock()
459      {
460	if (_M_owns)
461	  unlock();
462      }
463
464      unique_lock(const unique_lock&) = delete;
465      unique_lock& operator=(const unique_lock&) = delete;
466
467      unique_lock(unique_lock&& __u)
468      : _M_device(__u._M_device), _M_owns(__u._M_owns)
469      {
470	__u._M_device = 0;
471	__u._M_owns = false;
472      }
473
474      unique_lock& operator=(unique_lock&& __u)
475      {
476	if(_M_owns)
477	  unlock();
478
479	unique_lock(std::move(__u)).swap(*this);
480
481	__u._M_device = 0;
482	__u._M_owns = false;
483
484	return *this;
485      }
486
487      void
488      lock()
489      {
490	if (!_M_device)
491	  __throw_system_error(int(errc::operation_not_permitted));
492	else if (_M_owns)
493	  __throw_system_error(int(errc::resource_deadlock_would_occur));
494	else
495	  {
496	    _M_device->lock();
497	    _M_owns = true;
498	  }
499      }
500
501      bool
502      try_lock()
503      {
504	if (!_M_device)
505	  __throw_system_error(int(errc::operation_not_permitted));
506	else if (_M_owns)
507	  __throw_system_error(int(errc::resource_deadlock_would_occur));
508	else
509	  {
510	    _M_owns = _M_device->try_lock();
511	    return _M_owns;
512	  }
513      }
514
515      template<typename _Clock, typename _Duration>
516	bool
517	try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
518	{
519	  if (!_M_device)
520	    __throw_system_error(int(errc::operation_not_permitted));
521	  else if (_M_owns)
522	    __throw_system_error(int(errc::resource_deadlock_would_occur));
523	  else
524	    {
525	      _M_owns = _M_device->try_lock_until(__atime);
526	      return _M_owns;
527	    }
528	}
529
530      template<typename _Rep, typename _Period>
531	bool
532	try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
533	{
534	  if (!_M_device)
535	    __throw_system_error(int(errc::operation_not_permitted));
536	  else if (_M_owns)
537	    __throw_system_error(int(errc::resource_deadlock_would_occur));
538	  else
539	    {
540	      _M_owns = _M_device->try_lock_for(__rtime);
541	      return _M_owns;
542	    }
543	 }
544
545      void
546      unlock()
547      {
548	if (!_M_owns)
549	  __throw_system_error(int(errc::operation_not_permitted));
550	else if (_M_device)
551	  {
552	    _M_device->unlock();
553	    _M_owns = false;
554	  }
555      }
556
557      void
558      swap(unique_lock& __u)
559      {
560	std::swap(_M_device, __u._M_device);
561	std::swap(_M_owns, __u._M_owns);
562      }
563
564      mutex_type*
565      release()
566      {
567	mutex_type* __ret = _M_device;
568	_M_device = 0;
569	_M_owns = false;
570	return __ret;
571      }
572
573      bool
574      owns_lock() const
575      { return _M_owns; }
576
577      explicit operator bool() const
578      { return owns_lock(); }
579
580      mutex_type*
581      mutex() const
582      { return _M_device; }
583
584    private:
585      mutex_type*	_M_device;
586      bool		_M_owns; // XXX use atomic_bool
587    };
588
589  template<typename _Mutex>
590    inline void
591    swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
592    { __x.swap(__y); }
593
594  template<int _Idx>
595    struct __unlock_impl
596    {
597      template<typename... _Lock>
598	static void
599	__do_unlock(tuple<_Lock&...>& __locks)
600	{
601	  std::get<_Idx>(__locks).unlock();
602	  __unlock_impl<_Idx - 1>::__do_unlock(__locks);
603	}
604    };
605
606  template<>
607    struct __unlock_impl<-1>
608    {
609      template<typename... _Lock>
610	static void
611	__do_unlock(tuple<_Lock&...>&)
612	{ }
613    };
614
615  template<int _Idx, bool _Continue = true>
616    struct __try_lock_impl
617    {
618      template<typename... _Lock>
619	static int
620	__do_try_lock(tuple<_Lock&...>& __locks)
621	{
622	  if(std::get<_Idx>(__locks).try_lock())
623	    {
624	      return __try_lock_impl<_Idx + 1,
625		_Idx + 2 < sizeof...(_Lock)>::__do_try_lock(__locks);
626	    }
627	  else
628	    {
629	      __unlock_impl<_Idx>::__do_unlock(__locks);
630	      return _Idx;
631	    }
632	}
633    };
634
635  template<int _Idx>
636    struct __try_lock_impl<_Idx, false>
637    {
638      template<typename... _Lock>
639	static int
640	__do_try_lock(tuple<_Lock&...>& __locks)
641	{
642	  if(std::get<_Idx>(__locks).try_lock())
643	    return -1;
644	  else
645	    {
646	      __unlock_impl<_Idx>::__do_unlock(__locks);
647	      return _Idx;
648	    }
649	}
650    };
651
652  /** @brief Generic try_lock.
653   *  @param __l1 Meets Mutex requirements (try_lock() may throw).
654   *  @param __l2 Meets Mutex requirements (try_lock() may throw).
655   *  @param __l3 Meets Mutex requirements (try_lock() may throw).
656   *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
657   *          a 0-based index corresponding to the argument that returned false.
658   *  @post Either all arguments are locked, or none will be.
659   *
660   *  Sequentially calls try_lock() on each argument.
661   */
662  template<typename _Lock1, typename _Lock2, typename... _Lock3>
663    int
664    try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
665    {
666      tuple<_Lock1&, _Lock2&, _Lock3&...> __locks(__l1, __l2, __l3...);
667      return __try_lock_impl<0>::__do_try_lock(__locks);
668    }
669
670  /// lock
671  template<typename _L1, typename _L2, typename ..._L3>
672    void
673    lock(_L1&, _L2&, _L3&...);
674
675  /// once_flag
676  struct once_flag
677  {
678  private:
679    typedef __gthread_once_t __native_type;
680    __native_type  _M_once;
681
682  public:
683    once_flag()
684    {
685      __native_type __tmp = __GTHREAD_ONCE_INIT;
686      _M_once = __tmp;
687    }
688
689    once_flag(const once_flag&) = delete;
690    once_flag& operator=(const once_flag&) = delete;
691
692    template<typename _Callable, typename... _Args>
693      friend void
694      call_once(once_flag& __once, _Callable __f, _Args&&... __args);
695  };
696
697#ifdef _GLIBCXX_HAVE_TLS
698  extern __thread void* __once_callable;
699  extern __thread void (*__once_call)();
700
701  template<typename _Callable>
702    inline void
703    __once_call_impl()
704    {
705      (*(_Callable*)__once_callable)();
706    }
707#else
708  extern function<void()> __once_functor;
709
710  extern void
711  __set_once_functor_lock_ptr(unique_lock<mutex>*);
712
713  extern mutex&
714  __get_once_mutex();
715#endif
716
717  extern "C" void __once_proxy();
718
719  /// call_once
720  template<typename _Callable, typename... _Args>
721    void
722    call_once(once_flag& __once, _Callable __f, _Args&&... __args)
723    {
724#ifdef _GLIBCXX_HAVE_TLS
725      auto __bound_functor = std::bind<void>(__f, __args...);
726      __once_callable = &__bound_functor;
727      __once_call = &__once_call_impl<decltype(__bound_functor)>;
728#else
729      unique_lock<mutex> __functor_lock(__get_once_mutex());
730      __once_functor = std::bind<void>(__f, __args...);
731      __set_once_functor_lock_ptr(&__functor_lock);
732#endif
733
734      int __e = __gthread_once(&(__once._M_once), &__once_proxy);
735
736#ifndef _GLIBCXX_HAVE_TLS
737      if (__functor_lock)
738        __set_once_functor_lock_ptr(0);
739#endif
740
741      if (__e)
742	__throw_system_error(__e);
743    }
744
745  // @} group mutexes
746}
747
748#endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
749
750#endif // __GXX_EXPERIMENTAL_CXX0X__
751
752#endif // _GLIBCXX_MUTEX
753