1/*
2 * Copyright 2010-2015 Samy Al Bahra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifndef CK_SPINLOCK_CLH_H
28#define CK_SPINLOCK_CLH_H
29
30#include <ck_cc.h>
31#include <ck_limits.h>
32#include <ck_pr.h>
33#include <ck_stdbool.h>
34#include <ck_stddef.h>
35
36#ifndef CK_F_SPINLOCK_CLH
37#define CK_F_SPINLOCK_CLH
38
39struct ck_spinlock_clh {
40	unsigned int wait;
41	struct ck_spinlock_clh *previous;
42};
43typedef struct ck_spinlock_clh ck_spinlock_clh_t;
44
45CK_CC_INLINE static void
46ck_spinlock_clh_init(struct ck_spinlock_clh **lock, struct ck_spinlock_clh *unowned)
47{
48
49	unowned->previous = NULL;
50	unowned->wait = false;
51	*lock = unowned;
52	ck_pr_barrier();
53	return;
54}
55
56CK_CC_INLINE static bool
57ck_spinlock_clh_locked(struct ck_spinlock_clh **queue)
58{
59	struct ck_spinlock_clh *head;
60	bool r;
61
62	head = ck_pr_load_ptr(queue);
63	r = ck_pr_load_uint(&head->wait);
64	ck_pr_fence_acquire();
65	return r;
66}
67
68CK_CC_INLINE static void
69ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh *thread)
70{
71	struct ck_spinlock_clh *previous;
72
73	/* Indicate to the next thread on queue that they will have to block. */
74	thread->wait = true;
75	ck_pr_fence_store_atomic();
76
77	/*
78	 * Mark current request as last request. Save reference to previous
79	 * request.
80	 */
81	previous = ck_pr_fas_ptr(queue, thread);
82	thread->previous = previous;
83
84	/* Wait until previous thread is done with lock. */
85	ck_pr_fence_load();
86	while (ck_pr_load_uint(&previous->wait) == true)
87		ck_pr_stall();
88
89	ck_pr_fence_lock();
90	return;
91}
92
93CK_CC_INLINE static void
94ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread)
95{
96	struct ck_spinlock_clh *previous;
97
98	/*
99	 * If there are waiters, they are spinning on the current node wait
100	 * flag. The flag is cleared so that the successor may complete an
101	 * acquisition. If the caller is pre-empted then the predecessor field
102	 * may be updated by a successor's lock operation. In order to avoid
103	 * this, save a copy of the predecessor before setting the flag.
104	 */
105	previous = thread[0]->previous;
106
107	/*
108	 * We have to pay this cost anyways, use it as a compiler barrier too.
109	 */
110	ck_pr_fence_unlock();
111	ck_pr_store_uint(&(*thread)->wait, false);
112
113	/*
114	 * Predecessor is guaranteed not to be spinning on previous request,
115	 * so update caller to use previous structure. This allows successor
116	 * all the time in the world to successfully read updated wait flag.
117	 */
118	*thread = previous;
119	return;
120}
121#endif /* CK_F_SPINLOCK_CLH */
122#endif /* CK_SPINLOCK_CLH_H */
123