1/*
2 * Copyright 2009-2015 Samy Al Bahra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifndef CK_STACK_H
28#define CK_STACK_H
29
30#include <ck_cc.h>
31#include <ck_pr.h>
32#include <ck_stdbool.h>
33#include <ck_stddef.h>
34
35struct ck_stack_entry {
36	struct ck_stack_entry *next;
37};
38typedef struct ck_stack_entry ck_stack_entry_t;
39
40struct ck_stack {
41	struct ck_stack_entry *head;
42	char *generation CK_CC_PACKED;
43} CK_CC_ALIASED;
44typedef struct ck_stack ck_stack_t;
45
46#define CK_STACK_INITIALIZER { NULL, NULL }
47
48#ifndef CK_F_STACK_PUSH_UPMC
49#define CK_F_STACK_PUSH_UPMC
50/*
51 * Stack producer operation safe for multiple unique producers and multiple consumers.
52 */
53CK_CC_INLINE static void
54ck_stack_push_upmc(struct ck_stack *target, struct ck_stack_entry *entry)
55{
56	struct ck_stack_entry *stack;
57
58	stack = ck_pr_load_ptr(&target->head);
59	entry->next = stack;
60	ck_pr_fence_store();
61
62	while (ck_pr_cas_ptr_value(&target->head, stack, entry, &stack) == false) {
63		entry->next = stack;
64		ck_pr_fence_store();
65	}
66
67	return;
68}
69#endif /* CK_F_STACK_PUSH_UPMC */
70
71#ifndef CK_F_STACK_TRYPUSH_UPMC
72#define CK_F_STACK_TRYPUSH_UPMC
73/*
74 * Stack producer operation for multiple unique producers and multiple consumers.
75 * Returns true on success and false on failure.
76 */
77CK_CC_INLINE static bool
78ck_stack_trypush_upmc(struct ck_stack *target, struct ck_stack_entry *entry)
79{
80	struct ck_stack_entry *stack;
81
82	stack = ck_pr_load_ptr(&target->head);
83	entry->next = stack;
84	ck_pr_fence_store();
85
86	return ck_pr_cas_ptr(&target->head, stack, entry);
87}
88#endif /* CK_F_STACK_TRYPUSH_UPMC */
89
90#ifndef CK_F_STACK_POP_UPMC
91#define CK_F_STACK_POP_UPMC
92/*
93 * Stack consumer operation safe for multiple unique producers and multiple consumers.
94 */
95CK_CC_INLINE static struct ck_stack_entry *
96ck_stack_pop_upmc(struct ck_stack *target)
97{
98	struct ck_stack_entry *entry, *next;
99
100	entry = ck_pr_load_ptr(&target->head);
101	if (entry == NULL)
102		return NULL;
103
104	ck_pr_fence_load();
105	next = entry->next;
106	while (ck_pr_cas_ptr_value(&target->head, entry, next, &entry) == false) {
107		if (entry == NULL)
108			break;
109
110		ck_pr_fence_load();
111		next = entry->next;
112	}
113
114	return entry;
115}
116#endif
117
118#ifndef CK_F_STACK_TRYPOP_UPMC
119#define CK_F_STACK_TRYPOP_UPMC
120/*
121 * Stack production operation for multiple unique producers and multiple consumers.
122 * Returns true on success and false on failure. The value pointed to by the second
123 * argument is set to a valid ck_stack_entry_t reference if true is returned. If
124 * false is returned, then the value pointed to by the second argument is undefined.
125 */
126CK_CC_INLINE static bool
127ck_stack_trypop_upmc(struct ck_stack *target, struct ck_stack_entry **r)
128{
129	struct ck_stack_entry *entry;
130
131	entry = ck_pr_load_ptr(&target->head);
132	if (entry == NULL)
133		return false;
134
135	ck_pr_fence_load();
136	if (ck_pr_cas_ptr(&target->head, entry, entry->next) == true) {
137		*r = entry;
138		return true;
139	}
140
141	return false;
142}
143#endif /* CK_F_STACK_TRYPOP_UPMC */
144
145#ifndef CK_F_STACK_BATCH_POP_UPMC
146#define CK_F_STACK_BATCH_POP_UPMC
147/*
148 * Pop all items off the stack.
149 */
150CK_CC_INLINE static struct ck_stack_entry *
151ck_stack_batch_pop_upmc(struct ck_stack *target)
152{
153	struct ck_stack_entry *entry;
154
155	entry = ck_pr_fas_ptr(&target->head, NULL);
156	ck_pr_fence_load();
157	return entry;
158}
159#endif /* CK_F_STACK_BATCH_POP_UPMC */
160
161#ifndef CK_F_STACK_PUSH_MPMC
162#define CK_F_STACK_PUSH_MPMC
163/*
164 * Stack producer operation safe for multiple producers and multiple consumers.
165 */
166CK_CC_INLINE static void
167ck_stack_push_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
168{
169
170	ck_stack_push_upmc(target, entry);
171	return;
172}
173#endif /* CK_F_STACK_PUSH_MPMC */
174
175#ifndef CK_F_STACK_TRYPUSH_MPMC
176#define CK_F_STACK_TRYPUSH_MPMC
177/*
178 * Stack producer operation safe for multiple producers and multiple consumers.
179 */
180CK_CC_INLINE static bool
181ck_stack_trypush_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
182{
183
184	return ck_stack_trypush_upmc(target, entry);
185}
186#endif /* CK_F_STACK_TRYPUSH_MPMC */
187
188#ifdef CK_F_PR_CAS_PTR_2_VALUE
189#ifndef CK_F_STACK_POP_MPMC
190#define CK_F_STACK_POP_MPMC
191/*
192 * Stack consumer operation safe for multiple producers and multiple consumers.
193 */
194CK_CC_INLINE static struct ck_stack_entry *
195ck_stack_pop_mpmc(struct ck_stack *target)
196{
197	struct ck_stack original, update;
198
199	original.generation = ck_pr_load_ptr(&target->generation);
200	ck_pr_fence_load();
201	original.head = ck_pr_load_ptr(&target->head);
202	if (original.head == NULL)
203		return NULL;
204
205	/* Order with respect to next pointer. */
206	ck_pr_fence_load();
207
208	update.generation = original.generation + 1;
209	update.head = original.head->next;
210
211	while (ck_pr_cas_ptr_2_value(target, &original, &update, &original) == false) {
212		if (original.head == NULL)
213			return NULL;
214
215		update.generation = original.generation + 1;
216
217		/* Order with respect to next pointer. */
218		ck_pr_fence_load();
219		update.head = original.head->next;
220	}
221
222	return original.head;
223}
224#endif /* CK_F_STACK_POP_MPMC */
225
226#ifndef CK_F_STACK_TRYPOP_MPMC
227#define CK_F_STACK_TRYPOP_MPMC
228CK_CC_INLINE static bool
229ck_stack_trypop_mpmc(struct ck_stack *target, struct ck_stack_entry **r)
230{
231	struct ck_stack original, update;
232
233	original.generation = ck_pr_load_ptr(&target->generation);
234	ck_pr_fence_load();
235	original.head = ck_pr_load_ptr(&target->head);
236	if (original.head == NULL)
237		return false;
238
239	update.generation = original.generation + 1;
240	ck_pr_fence_load();
241	update.head = original.head->next;
242
243	if (ck_pr_cas_ptr_2_value(target, &original, &update, &original) == true) {
244		*r = original.head;
245		return true;
246	}
247
248	return false;
249}
250#endif /* CK_F_STACK_TRYPOP_MPMC */
251#endif /* CK_F_PR_CAS_PTR_2_VALUE */
252
253#ifndef CK_F_STACK_BATCH_POP_MPMC
254#define CK_F_STACK_BATCH_POP_MPMC
255/*
256 * This is equivalent to the UP/MC version as NULL does not need a
257 * a generation count.
258 */
259CK_CC_INLINE static struct ck_stack_entry *
260ck_stack_batch_pop_mpmc(struct ck_stack *target)
261{
262
263	return ck_stack_batch_pop_upmc(target);
264}
265#endif /* CK_F_STACK_BATCH_POP_MPMC */
266
267#ifndef CK_F_STACK_PUSH_MPNC
268#define CK_F_STACK_PUSH_MPNC
269/*
270 * Stack producer operation safe with no concurrent consumers.
271 */
272CK_CC_INLINE static void
273ck_stack_push_mpnc(struct ck_stack *target, struct ck_stack_entry *entry)
274{
275	struct ck_stack_entry *stack;
276
277	entry->next = NULL;
278	ck_pr_fence_store_atomic();
279	stack = ck_pr_fas_ptr(&target->head, entry);
280	ck_pr_store_ptr(&entry->next, stack);
281	ck_pr_fence_store();
282
283	return;
284}
285#endif /* CK_F_STACK_PUSH_MPNC */
286
287/*
288 * Stack producer operation for single producer and no concurrent consumers.
289 */
290CK_CC_INLINE static void
291ck_stack_push_spnc(struct ck_stack *target, struct ck_stack_entry *entry)
292{
293
294	entry->next = target->head;
295	target->head = entry;
296	return;
297}
298
299/*
300 * Stack consumer operation for no concurrent producers and single consumer.
301 */
302CK_CC_INLINE static struct ck_stack_entry *
303ck_stack_pop_npsc(struct ck_stack *target)
304{
305	struct ck_stack_entry *n;
306
307	if (target->head == NULL)
308		return NULL;
309
310	n = target->head;
311	target->head = n->next;
312
313	return n;
314}
315
316/*
317 * Pop all items off a stack.
318 */
319CK_CC_INLINE static struct ck_stack_entry *
320ck_stack_batch_pop_npsc(struct ck_stack *target)
321{
322	struct ck_stack_entry *n;
323
324	n = target->head;
325	target->head = NULL;
326
327	return n;
328}
329
330/*
331 * Stack initialization function. Guarantees initialization across processors.
332 */
333CK_CC_INLINE static void
334ck_stack_init(struct ck_stack *stack)
335{
336
337	stack->head = NULL;
338	stack->generation = NULL;
339	return;
340}
341
342/* Defines a container_of functions for */
343#define CK_STACK_CONTAINER(T, M, N) CK_CC_CONTAINER(ck_stack_entry_t, T, M, N)
344
345#define CK_STACK_ISEMPTY(m) ((m)->head == NULL)
346#define CK_STACK_FIRST(s)   ((s)->head)
347#define CK_STACK_NEXT(m)    ((m)->next)
348#define CK_STACK_FOREACH(stack, entry)				\
349	for ((entry) = CK_STACK_FIRST(stack);			\
350	     (entry) != NULL;					\
351	     (entry) = CK_STACK_NEXT(entry))
352#define CK_STACK_FOREACH_SAFE(stack, entry, T)			\
353	for ((entry) = CK_STACK_FIRST(stack);			\
354	     (entry) != NULL && ((T) = (entry)->next, 1);	\
355	     (entry) = (T))
356
357#endif /* CK_STACK_H */
358