1/*
2 * Copyright 2010-2015 Samy Al Bahra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * (c) Copyright 2008, IBM Corporation.
29 * Licensed under the Apache License, Version 2.0 (the "License");
30 * you may not use this file except in compliance with the License.
31 * You may obtain a copy of the License at
32 *
33 * http://www.apache.org/licenses/LICENSE-2.0
34 *
35 * Unless required by applicable law or agreed to in writing, software
36 * distributed under the License is distributed on an "AS IS" BASIS,
37 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
38 * See the License for the specific language governing permissions and
39 * limitations under the License.
40 */
41
42/*
43 * This is an implementation of hazard pointers as detailed in:
44 *   http://www.research.ibm.com/people/m/michael/ieeetpds-2004.pdf
45 *
46 * This API provides a publishing mechanism that defers destruction of
47 * hazard pointers until it is safe to do so. Preventing arbitrary re-use
48 * protects against the ABA problem and provides safe memory reclamation.
49 * The implementation was derived from the Hazard Pointers implementation
50 * from the Amino CBBS project. It has been heavily modified for Concurrency
51 * Kit.
52 */
53
54#include <ck_backoff.h>
55#include <ck_cc.h>
56#include <ck_hp.h>
57#include <ck_pr.h>
58#include <ck_stack.h>
59#include <ck_stdbool.h>
60#include <ck_stddef.h>
61#include <ck_stdlib.h>
62#include <ck_string.h>
63
64CK_STACK_CONTAINER(struct ck_hp_record, global_entry, ck_hp_record_container)
65CK_STACK_CONTAINER(struct ck_hp_hazard, pending_entry, ck_hp_hazard_container)
66
67void
68ck_hp_init(struct ck_hp *state,
69	   unsigned int degree,
70	   unsigned int threshold,
71	   ck_hp_destructor_t destroy)
72{
73
74	state->threshold = threshold;
75	state->degree = degree;
76	state->destroy = destroy;
77	state->n_subscribers = 0;
78	state->n_free = 0;
79	ck_stack_init(&state->subscribers);
80	ck_pr_fence_store();
81
82	return;
83}
84
85void
86ck_hp_set_threshold(struct ck_hp *state, unsigned int threshold)
87{
88
89	ck_pr_store_uint(&state->threshold, threshold);
90	return;
91}
92
93struct ck_hp_record *
94ck_hp_recycle(struct ck_hp *global)
95{
96	struct ck_hp_record *record;
97	ck_stack_entry_t *entry;
98	int state;
99
100	if (ck_pr_load_uint(&global->n_free) == 0)
101		return NULL;
102
103	CK_STACK_FOREACH(&global->subscribers, entry) {
104		record = ck_hp_record_container(entry);
105
106		if (ck_pr_load_int(&record->state) == CK_HP_FREE) {
107			ck_pr_fence_load();
108			state = ck_pr_fas_int(&record->state, CK_HP_USED);
109			if (state == CK_HP_FREE) {
110				ck_pr_dec_uint(&global->n_free);
111				return record;
112			}
113		}
114	}
115
116	return NULL;
117}
118
119void
120ck_hp_unregister(struct ck_hp_record *entry)
121{
122
123	entry->n_pending = 0;
124	entry->n_peak = 0;
125	entry->n_reclamations = 0;
126	ck_stack_init(&entry->pending);
127	ck_pr_fence_store();
128	ck_pr_store_int(&entry->state, CK_HP_FREE);
129	ck_pr_inc_uint(&entry->global->n_free);
130	return;
131}
132
133void
134ck_hp_register(struct ck_hp *state,
135    struct ck_hp_record *entry,
136    void **pointers)
137{
138
139	entry->state = CK_HP_USED;
140	entry->global = state;
141	entry->pointers = pointers;
142	entry->n_pending = 0;
143	entry->n_peak = 0;
144	entry->n_reclamations = 0;
145	memset(pointers, 0, state->degree * sizeof(void *));
146	ck_stack_init(&entry->pending);
147	ck_pr_fence_store();
148	ck_stack_push_upmc(&state->subscribers, &entry->global_entry);
149	ck_pr_inc_uint(&state->n_subscribers);
150	return;
151}
152
153static int
154hazard_compare(const void *a, const void *b)
155{
156	void * const *x;
157	void * const *y;
158
159	x = a;
160	y = b;
161	return ((*x > *y) - (*x < *y));
162}
163
164CK_CC_INLINE static bool
165ck_hp_member_scan(ck_stack_entry_t *entry, unsigned int degree, void *pointer)
166{
167	struct ck_hp_record *record;
168	unsigned int i;
169	void *hazard;
170
171	do {
172		record = ck_hp_record_container(entry);
173		if (ck_pr_load_int(&record->state) == CK_HP_FREE)
174			continue;
175
176		if (ck_pr_load_ptr(&record->pointers) == NULL)
177			continue;
178
179		for (i = 0; i < degree; i++) {
180			hazard = ck_pr_load_ptr(&record->pointers[i]);
181			if (hazard == pointer)
182				return (true);
183		}
184	} while ((entry = CK_STACK_NEXT(entry)) != NULL);
185
186	return (false);
187}
188
189CK_CC_INLINE static void *
190ck_hp_member_cache(struct ck_hp *global, void **cache, unsigned int *n_hazards)
191{
192	struct ck_hp_record *record;
193	ck_stack_entry_t *entry;
194	unsigned int hazards = 0;
195	unsigned int i;
196	void *pointer;
197
198	CK_STACK_FOREACH(&global->subscribers, entry) {
199		record = ck_hp_record_container(entry);
200		if (ck_pr_load_int(&record->state) == CK_HP_FREE)
201			continue;
202
203		if (ck_pr_load_ptr(&record->pointers) == NULL)
204			continue;
205
206		for (i = 0; i < global->degree; i++) {
207			if (hazards > CK_HP_CACHE)
208				break;
209
210			pointer = ck_pr_load_ptr(&record->pointers[i]);
211			if (pointer != NULL)
212				cache[hazards++] = pointer;
213		}
214	}
215
216	*n_hazards = hazards;
217	return (entry);
218}
219
220void
221ck_hp_reclaim(struct ck_hp_record *thread)
222{
223	struct ck_hp_hazard *hazard;
224	struct ck_hp *global = thread->global;
225	unsigned int n_hazards;
226	void **cache, *marker, *match;
227	ck_stack_entry_t *previous, *entry, *next;
228
229	/* Store as many entries as possible in local array. */
230	cache = thread->cache;
231	marker = ck_hp_member_cache(global, cache, &n_hazards);
232
233	/*
234	 * In theory, there is an n such that (n * (log n) ** 2) < np.
235	 */
236	qsort(cache, n_hazards, sizeof(void *), hazard_compare);
237
238	previous = NULL;
239	CK_STACK_FOREACH_SAFE(&thread->pending, entry, next) {
240		hazard = ck_hp_hazard_container(entry);
241		match = bsearch(&hazard->pointer, cache, n_hazards,
242				  sizeof(void *), hazard_compare);
243		if (match != NULL) {
244			previous = entry;
245			continue;
246		}
247
248		if (marker != NULL &&
249		    ck_hp_member_scan(marker, global->degree, hazard->pointer)) {
250			previous = entry;
251			continue;
252		}
253
254		thread->n_pending -= 1;
255
256		/* Remove from the pending stack. */
257		if (previous)
258			CK_STACK_NEXT(previous) = CK_STACK_NEXT(entry);
259		else
260			CK_STACK_FIRST(&thread->pending) = CK_STACK_NEXT(entry);
261
262		/* The entry is now safe to destroy. */
263		global->destroy(hazard->data);
264		thread->n_reclamations++;
265	}
266
267	return;
268}
269
270void
271ck_hp_retire(struct ck_hp_record *thread,
272    struct ck_hp_hazard *hazard,
273    void *data,
274    void *pointer)
275{
276
277	ck_pr_store_ptr(&hazard->pointer, pointer);
278	ck_pr_store_ptr(&hazard->data, data);
279	ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
280
281	thread->n_pending += 1;
282	if (thread->n_pending > thread->n_peak)
283		thread->n_peak = thread->n_pending;
284
285	return;
286}
287
288void
289ck_hp_free(struct ck_hp_record *thread,
290    struct ck_hp_hazard *hazard,
291    void *data,
292    void *pointer)
293{
294	struct ck_hp *global;
295
296	global = ck_pr_load_ptr(&thread->global);
297	ck_pr_store_ptr(&hazard->data, data);
298	ck_pr_store_ptr(&hazard->pointer, pointer);
299	ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
300
301	thread->n_pending += 1;
302	if (thread->n_pending > thread->n_peak)
303		thread->n_peak = thread->n_pending;
304
305	if (thread->n_pending >= global->threshold)
306		ck_hp_reclaim(thread);
307
308	return;
309}
310
311void
312ck_hp_purge(struct ck_hp_record *thread)
313{
314	ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
315
316	while (thread->n_pending > 0) {
317		ck_hp_reclaim(thread);
318		if (thread->n_pending > 0)
319			ck_backoff_eb(&backoff);
320	}
321
322	return;
323}
324