1184902Srwatson/*
2191273Srwatson * CDDL HEADER START
3184902Srwatson *
4184902Srwatson * The contents of this file are subject to the terms of the
5184902Srwatson * Common Development and Distribution License (the "License").
6184902Srwatson * You may not use this file except in compliance with the License.
7184902Srwatson *
8184902Srwatson * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9184902Srwatson * or http://www.opensolaris.org/os/licensing.
10184902Srwatson * See the License for the specific language governing permissions
11184902Srwatson * and limitations under the License.
12184902Srwatson *
13184902Srwatson * When distributing Covered Code, include this CDDL HEADER in each
14184902Srwatson * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15184902Srwatson * If applicable, add the following below this CDDL HEADER, with the
16184902Srwatson * fields enclosed by brackets "[]" replaced with your own identifying
17184902Srwatson * information: Portions Copyright [yyyy] [name of copyright owner]
18184902Srwatson *
19184902Srwatson * CDDL HEADER END
20184902Srwatson */
21184902Srwatson/*
22184902Srwatson * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23184902Srwatson * Use is subject to license terms.
24184902Srwatson */
25184902Srwatson
26184902Srwatson#pragma ident	"%Z%%M%	%I%	%E% SMI"
27184902Srwatson
28184902Srwatson#include "libuutil_common.h"
29244390Srwatson
30184902Srwatson#include <stdlib.h>
31184902Srwatson#include <string.h>
32184902Srwatson#include <unistd.h>
33184902Srwatson#include <sys/time.h>
34184902Srwatson
35184902Srwatson#define	ELEM_TO_NODE(lp, e) \
36184902Srwatson	((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
37184902Srwatson
38184902Srwatson#define	NODE_TO_ELEM(lp, n) \
39184902Srwatson	((void *)((uintptr_t)(n) - (lp)->ul_offset))
40184902Srwatson
41184902Srwatson/*
42184902Srwatson * uu_list_index_ts define a location for insertion.  They are simply a
43184902Srwatson * pointer to the object after the insertion point.  We store a mark
44184902Srwatson * in the low-bits of the index, to help prevent mistakes.
45184902Srwatson *
46184902Srwatson * When debugging, the index mark changes on every insert and delete, to
47184902Srwatson * catch stale references.
48184902Srwatson */
49184902Srwatson#define	INDEX_MAX		(sizeof (uintptr_t) - 1)
50184902Srwatson#define	INDEX_NEXT(m)		(((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
51184902Srwatson
52184902Srwatson#define	INDEX_TO_NODE(i)	((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
53184902Srwatson#define	NODE_TO_INDEX(p, n)	(((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
54184902Srwatson#define	INDEX_VALID(p, i)	(((i) & INDEX_MAX) == (p)->ul_index)
55184902Srwatson#define	INDEX_CHECK(i)		(((i) & INDEX_MAX) != 0)
56184902Srwatson
57184902Srwatson#define	POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
58184902Srwatson
59184902Srwatsonstatic uu_list_pool_t	uu_null_lpool = { &uu_null_lpool, &uu_null_lpool };
60184902Srwatsonstatic pthread_mutex_t	uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER;
61184902Srwatson
62184902Srwatsonuu_list_pool_t *
63184902Srwatsonuu_list_pool_create(const char *name, size_t objsize,
64184902Srwatson    size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags)
65184902Srwatson{
66184902Srwatson	uu_list_pool_t *pp, *next, *prev;
67184902Srwatson
68184902Srwatson	if (name == NULL ||
69184902Srwatson	    uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
70184902Srwatson	    nodeoffset + sizeof (uu_list_node_t) > objsize) {
71184902Srwatson		uu_set_error(UU_ERROR_INVALID_ARGUMENT);
72184902Srwatson		return (NULL);
73184902Srwatson	}
74184902Srwatson
75184902Srwatson	if (flags & ~UU_LIST_POOL_DEBUG) {
76184902Srwatson		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
77184902Srwatson		return (NULL);
78184902Srwatson	}
79184902Srwatson
80184902Srwatson	pp = uu_zalloc(sizeof (uu_list_pool_t));
81184902Srwatson	if (pp == NULL) {
82184902Srwatson		uu_set_error(UU_ERROR_NO_MEMORY);
83184902Srwatson		return (NULL);
84184902Srwatson	}
85184902Srwatson
86184902Srwatson	(void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name));
87184902Srwatson	pp->ulp_nodeoffset = nodeoffset;
88184902Srwatson	pp->ulp_objsize = objsize;
89184902Srwatson	pp->ulp_cmp = compare_func;
90184902Srwatson	if (flags & UU_LIST_POOL_DEBUG)
91184902Srwatson		pp->ulp_debug = 1;
92184902Srwatson	pp->ulp_last_index = 0;
93184902Srwatson
94184902Srwatson	(void) pthread_mutex_init(&pp->ulp_lock, NULL);
95184902Srwatson
96184902Srwatson	pp->ulp_null_list.ul_next_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
97184902Srwatson	pp->ulp_null_list.ul_prev_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
98184902Srwatson
99184902Srwatson	(void) pthread_mutex_lock(&uu_lpool_list_lock);
100184902Srwatson	pp->ulp_next = next = &uu_null_lpool;
101184902Srwatson	pp->ulp_prev = prev = next->ulp_prev;
102184902Srwatson	next->ulp_prev = pp;
103184902Srwatson	prev->ulp_next = pp;
104184902Srwatson	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
105184902Srwatson
106184902Srwatson	return (pp);
107184902Srwatson}
108184902Srwatson
109184902Srwatsonvoid
110184902Srwatsonuu_list_pool_destroy(uu_list_pool_t *pp)
111184902Srwatson{
112184902Srwatson	if (pp->ulp_debug) {
113184902Srwatson		if (pp->ulp_null_list.ul_next_enc !=
114184902Srwatson		    UU_PTR_ENCODE(&pp->ulp_null_list) ||
115184902Srwatson		    pp->ulp_null_list.ul_prev_enc !=
116184902Srwatson		    UU_PTR_ENCODE(&pp->ulp_null_list)) {
117184902Srwatson			uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
118184902Srwatson			    "outstanding lists, or is corrupt.\n",
119184902Srwatson			    (int)sizeof (pp->ulp_name), pp->ulp_name,
120184902Srwatson			    (void *)pp);
121184902Srwatson		}
122184902Srwatson	}
123184902Srwatson	(void) pthread_mutex_lock(&uu_lpool_list_lock);
124184902Srwatson	pp->ulp_next->ulp_prev = pp->ulp_prev;
125184902Srwatson	pp->ulp_prev->ulp_next = pp->ulp_next;
126184902Srwatson	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
127184902Srwatson	pp->ulp_prev = NULL;
128184902Srwatson	pp->ulp_next = NULL;
129184902Srwatson	uu_free(pp);
130184902Srwatson}
131184902Srwatson
132184902Srwatsonvoid
133184902Srwatsonuu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
134184902Srwatson{
135184902Srwatson	uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
136184902Srwatson
137184902Srwatson	if (pp->ulp_debug) {
138184902Srwatson		uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
139184902Srwatson		if (offset + sizeof (*np) > pp->ulp_objsize) {
140184902Srwatson			uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
141184902Srwatson			    "offset %ld doesn't fit in object (size %ld)\n",
142184902Srwatson			    base, (void *)np, (void *)pp, pp->ulp_name,
143184902Srwatson			    (long)offset, (long)pp->ulp_objsize);
144184902Srwatson		}
145184902Srwatson		if (offset != pp->ulp_nodeoffset) {
146184902Srwatson			uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
147184902Srwatson			    "offset %ld doesn't match pool's offset (%ld)\n",
148184902Srwatson			    base, (void *)np, (void *)pp, pp->ulp_name,
149184902Srwatson			    (long)offset, (long)pp->ulp_objsize);
150184902Srwatson		}
151184902Srwatson	}
152184902Srwatson	np->uln_next = POOL_TO_MARKER(pp);
153184902Srwatson	np->uln_prev = NULL;
154184902Srwatson}
155184902Srwatson
156184902Srwatsonvoid
157184902Srwatsonuu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
158184902Srwatson{
159184902Srwatson	uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
160184902Srwatson
161184902Srwatson	if (pp->ulp_debug) {
162184902Srwatson		if (np->uln_next == NULL &&
163184902Srwatson		    np->uln_prev == NULL) {
164184902Srwatson			uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
165184902Srwatson			    "node already finied\n",
166184902Srwatson			    base, (void *)np_arg, (void *)pp, pp->ulp_name);
167186647Srwatson		}
168186647Srwatson		if (np->uln_next != POOL_TO_MARKER(pp) ||
169186647Srwatson		    np->uln_prev != NULL) {
170184902Srwatson			uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
171184902Srwatson			    "node corrupt or on list\n",
172184902Srwatson			    base, (void *)np_arg, (void *)pp, pp->ulp_name);
173184902Srwatson		}
174184902Srwatson	}
175184902Srwatson	np->uln_next = NULL;
176184902Srwatson	np->uln_prev = NULL;
177184902Srwatson}
178184902Srwatson
179184902Srwatsonuu_list_t *
180184902Srwatsonuu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags)
181184902Srwatson{
182186647Srwatson	uu_list_t *lp, *next, *prev;
183184902Srwatson
184184902Srwatson	if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) {
185184902Srwatson		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
186184902Srwatson		return (NULL);
187184902Srwatson	}
188184902Srwatson
189184902Srwatson	if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) {
190184902Srwatson		if (pp->ulp_debug)
191184902Srwatson			uu_panic("uu_list_create(%p, ...): requested "
192184902Srwatson			    "UU_LIST_SORTED, but pool has no comparison func\n",
193184902Srwatson			    (void *)pp);
194184902Srwatson		uu_set_error(UU_ERROR_NOT_SUPPORTED);
195184902Srwatson		return (NULL);
196184902Srwatson	}
197184902Srwatson
198184902Srwatson	lp = uu_zalloc(sizeof (*lp));
199184902Srwatson	if (lp == NULL) {
200184902Srwatson		uu_set_error(UU_ERROR_NO_MEMORY);
201184902Srwatson		return (NULL);
202184902Srwatson	}
203184902Srwatson
204184902Srwatson	lp->ul_pool = pp;
205184902Srwatson	lp->ul_parent_enc = UU_PTR_ENCODE(parent);
206184902Srwatson	lp->ul_offset = pp->ulp_nodeoffset;
207184902Srwatson	lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG);
208184902Srwatson	lp->ul_sorted = (flags & UU_LIST_SORTED);
209186647Srwatson	lp->ul_numnodes = 0;
210184902Srwatson	lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index));
211184902Srwatson
212184902Srwatson	lp->ul_null_node.uln_next = &lp->ul_null_node;
213184902Srwatson	lp->ul_null_node.uln_prev = &lp->ul_null_node;
214184902Srwatson
215184902Srwatson	lp->ul_null_walk.ulw_next = &lp->ul_null_walk;
216184902Srwatson	lp->ul_null_walk.ulw_prev = &lp->ul_null_walk;
217184902Srwatson
218184902Srwatson	(void) pthread_mutex_lock(&pp->ulp_lock);
219184902Srwatson	next = &pp->ulp_null_list;
220184902Srwatson	prev = UU_PTR_DECODE(next->ul_prev_enc);
221184902Srwatson	lp->ul_next_enc = UU_PTR_ENCODE(next);
222184902Srwatson	lp->ul_prev_enc = UU_PTR_ENCODE(prev);
223184902Srwatson	next->ul_prev_enc = UU_PTR_ENCODE(lp);
224184902Srwatson	prev->ul_next_enc = UU_PTR_ENCODE(lp);
225184902Srwatson	(void) pthread_mutex_unlock(&pp->ulp_lock);
226184902Srwatson
227184902Srwatson	return (lp);
228184902Srwatson}
229184902Srwatson
230184902Srwatsonvoid
231184902Srwatsonuu_list_destroy(uu_list_t *lp)
232184902Srwatson{
233184902Srwatson	uu_list_pool_t *pp = lp->ul_pool;
234184902Srwatson
235184902Srwatson	if (lp->ul_debug) {
236244390Srwatson		if (lp->ul_null_node.uln_next != &lp->ul_null_node ||
237184902Srwatson		    lp->ul_null_node.uln_prev != &lp->ul_null_node) {
238184902Srwatson			uu_panic("uu_list_destroy(%p):  list not empty\n",
239184902Srwatson			    (void *)lp);
240184902Srwatson		}
241184902Srwatson		if (lp->ul_numnodes != 0) {
242184902Srwatson			uu_panic("uu_list_destroy(%p):  numnodes is nonzero, "
243184902Srwatson			    "but list is empty\n", (void *)lp);
244184902Srwatson		}
245184902Srwatson		if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk ||
246184902Srwatson		    lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) {
247184902Srwatson			uu_panic("uu_list_destroy(%p):  outstanding walkers\n",
248184902Srwatson			    (void *)lp);
249184902Srwatson		}
250184902Srwatson	}
251184902Srwatson
252184902Srwatson	(void) pthread_mutex_lock(&pp->ulp_lock);
253184902Srwatson	UU_LIST_PTR(lp->ul_next_enc)->ul_prev_enc = lp->ul_prev_enc;
254186647Srwatson	UU_LIST_PTR(lp->ul_prev_enc)->ul_next_enc = lp->ul_next_enc;
255186647Srwatson	(void) pthread_mutex_unlock(&pp->ulp_lock);
256184902Srwatson	lp->ul_prev_enc = UU_PTR_ENCODE(NULL);
257184902Srwatson	lp->ul_next_enc = UU_PTR_ENCODE(NULL);
258184902Srwatson	lp->ul_pool = NULL;
259184902Srwatson	uu_free(lp);
260184902Srwatson}
261184902Srwatson
262184902Srwatsonstatic void
263184902Srwatsonlist_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev,
264184902Srwatson    uu_list_node_impl_t *next)
265184902Srwatson{
266184902Srwatson	if (lp->ul_debug) {
267184902Srwatson		if (next->uln_prev != prev || prev->uln_next != next)
268184902Srwatson			uu_panic("insert(%p): internal error: %p and %p not "
269184902Srwatson			    "neighbors\n", (void *)lp, (void *)next,
270184902Srwatson			    (void *)prev);
271184902Srwatson
272184902Srwatson		if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) ||
273186647Srwatson		    np->uln_prev != NULL) {
274186647Srwatson			uu_panic("insert(%p): elem %p node %p corrupt, "
275184902Srwatson			    "not initialized, or already in a list.\n",
276184902Srwatson			    (void *)lp, NODE_TO_ELEM(lp, np), (void *)np);
277184902Srwatson		}
278184902Srwatson		/*
279184902Srwatson		 * invalidate outstanding uu_list_index_ts.
280184902Srwatson		 */
281184902Srwatson		lp->ul_index = INDEX_NEXT(lp->ul_index);
282244390Srwatson	}
283184902Srwatson	np->uln_next = next;
284184902Srwatson	np->uln_prev = prev;
285186647Srwatson	next->uln_prev = np;
286187214Srwatson	prev->uln_next = np;
287186647Srwatson
288187214Srwatson	lp->ul_numnodes++;
289186647Srwatson}
290191273Srwatson
291187214Srwatsonvoid
292187214Srwatsonuu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx)
293187214Srwatson{
294187214Srwatson	uu_list_node_impl_t *np;
295191273Srwatson
296187214Srwatson	np = INDEX_TO_NODE(idx);
297186647Srwatson	if (np == NULL)
298184902Srwatson		np = &lp->ul_null_node;
299184902Srwatson
300184902Srwatson	if (lp->ul_debug) {
301		if (!INDEX_VALID(lp, idx))
302			uu_panic("uu_list_insert(%p, %p, %p): %s\n",
303			    (void *)lp, elem, (void *)idx,
304			    INDEX_CHECK(idx)? "outdated index" :
305			    "invalid index");
306		if (np->uln_prev == NULL)
307			uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
308			    "index\n", (void *)lp, elem, (void *)idx);
309	}
310
311	list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
312}
313
314void *
315uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out)
316{
317	int sorted = lp->ul_sorted;
318	uu_compare_fn_t *func = lp->ul_pool->ulp_cmp;
319	uu_list_node_impl_t *np;
320
321	if (func == NULL) {
322		if (out != NULL)
323			*out = 0;
324		uu_set_error(UU_ERROR_NOT_SUPPORTED);
325		return (NULL);
326	}
327	for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node;
328	    np = np->uln_next) {
329		void *ep = NODE_TO_ELEM(lp, np);
330		int cmp = func(ep, elem, private);
331		if (cmp == 0) {
332			if (out != NULL)
333				*out = NODE_TO_INDEX(lp, np);
334			return (ep);
335		}
336		if (sorted && cmp > 0) {
337			if (out != NULL)
338				*out = NODE_TO_INDEX(lp, np);
339			return (NULL);
340		}
341	}
342	if (out != NULL)
343		*out = NODE_TO_INDEX(lp, 0);
344	return (NULL);
345}
346
347void *
348uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx)
349{
350	uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
351
352	if (np == NULL)
353		np = &lp->ul_null_node;
354
355	if (lp->ul_debug) {
356		if (!INDEX_VALID(lp, idx))
357			uu_panic("uu_list_nearest_next(%p, %p): %s\n",
358			    (void *)lp, (void *)idx,
359			    INDEX_CHECK(idx)? "outdated index" :
360			    "invalid index");
361		if (np->uln_prev == NULL)
362			uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
363			    "index\n", (void *)lp, (void *)idx);
364	}
365
366	if (np == &lp->ul_null_node)
367		return (NULL);
368	else
369		return (NODE_TO_ELEM(lp, np));
370}
371
372void *
373uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx)
374{
375	uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
376
377	if (np == NULL)
378		np = &lp->ul_null_node;
379
380	if (lp->ul_debug) {
381		if (!INDEX_VALID(lp, idx))
382			uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
383			    (void *)lp, (void *)idx, INDEX_CHECK(idx)?
384			    "outdated index" : "invalid index");
385		if (np->uln_prev == NULL)
386			uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
387			    "index\n", (void *)lp, (void *)idx);
388	}
389
390	if ((np = np->uln_prev) == &lp->ul_null_node)
391		return (NULL);
392	else
393		return (NODE_TO_ELEM(lp, np));
394}
395
396static void
397list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags)
398{
399	uu_list_walk_t *next, *prev;
400
401	int robust = (flags & UU_WALK_ROBUST);
402	int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
403
404	(void) memset(wp, 0, sizeof (*wp));
405	wp->ulw_list = lp;
406	wp->ulw_robust = robust;
407	wp->ulw_dir = direction;
408	if (direction > 0)
409		wp->ulw_next_result = lp->ul_null_node.uln_next;
410	else
411		wp->ulw_next_result = lp->ul_null_node.uln_prev;
412
413	if (lp->ul_debug || robust) {
414		/*
415		 * Add this walker to the list's list of walkers so
416		 * uu_list_remove() can advance us if somebody tries to
417		 * remove ulw_next_result.
418		 */
419		wp->ulw_next = next = &lp->ul_null_walk;
420		wp->ulw_prev = prev = next->ulw_prev;
421		next->ulw_prev = wp;
422		prev->ulw_next = wp;
423	}
424}
425
426static uu_list_node_impl_t *
427list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp)
428{
429	uu_list_node_impl_t *np = wp->ulw_next_result;
430	uu_list_node_impl_t *next;
431
432	if (np == &lp->ul_null_node)
433		return (NULL);
434
435	next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev;
436
437	wp->ulw_next_result = next;
438	return (np);
439}
440
441static void
442list_walk_fini(uu_list_walk_t *wp)
443{
444	/* GLXXX debugging? */
445	if (wp->ulw_next != NULL) {
446		wp->ulw_next->ulw_prev = wp->ulw_prev;
447		wp->ulw_prev->ulw_next = wp->ulw_next;
448		wp->ulw_next = NULL;
449		wp->ulw_prev = NULL;
450	}
451	wp->ulw_list = NULL;
452	wp->ulw_next_result = NULL;
453}
454
455uu_list_walk_t *
456uu_list_walk_start(uu_list_t *lp, uint32_t flags)
457{
458	uu_list_walk_t *wp;
459
460	if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
461		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
462		return (NULL);
463	}
464
465	wp = uu_zalloc(sizeof (*wp));
466	if (wp == NULL) {
467		uu_set_error(UU_ERROR_NO_MEMORY);
468		return (NULL);
469	}
470
471	list_walk_init(wp, lp, flags);
472	return (wp);
473}
474
475void *
476uu_list_walk_next(uu_list_walk_t *wp)
477{
478	uu_list_t *lp = wp->ulw_list;
479	uu_list_node_impl_t *np = list_walk_advance(wp, lp);
480
481	if (np == NULL)
482		return (NULL);
483
484	return (NODE_TO_ELEM(lp, np));
485}
486
487void
488uu_list_walk_end(uu_list_walk_t *wp)
489{
490	list_walk_fini(wp);
491	uu_free(wp);
492}
493
494int
495uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags)
496{
497	uu_list_node_impl_t *np;
498
499	int status = UU_WALK_NEXT;
500
501	int robust = (flags & UU_WALK_ROBUST);
502	int reverse = (flags & UU_WALK_REVERSE);
503
504	if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
505		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
506		return (-1);
507	}
508
509	if (lp->ul_debug || robust) {
510		uu_list_walk_t my_walk;
511		void *e;
512
513		list_walk_init(&my_walk, lp, flags);
514		while (status == UU_WALK_NEXT &&
515		    (e = uu_list_walk_next(&my_walk)) != NULL)
516			status = (*func)(e, private);
517		list_walk_fini(&my_walk);
518	} else {
519		if (!reverse) {
520			for (np = lp->ul_null_node.uln_next;
521			    status == UU_WALK_NEXT && np != &lp->ul_null_node;
522			    np = np->uln_next) {
523				status = (*func)(NODE_TO_ELEM(lp, np), private);
524			}
525		} else {
526			for (np = lp->ul_null_node.uln_prev;
527			    status == UU_WALK_NEXT && np != &lp->ul_null_node;
528			    np = np->uln_prev) {
529				status = (*func)(NODE_TO_ELEM(lp, np), private);
530			}
531		}
532	}
533	if (status >= 0)
534		return (0);
535	uu_set_error(UU_ERROR_CALLBACK_FAILED);
536	return (-1);
537}
538
539void
540uu_list_remove(uu_list_t *lp, void *elem)
541{
542	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem);
543	uu_list_walk_t *wp;
544
545	if (lp->ul_debug) {
546		if (np->uln_prev == NULL)
547			uu_panic("uu_list_remove(%p, %p): elem not on list\n",
548			    (void *)lp, elem);
549		/*
550		 * invalidate outstanding uu_list_index_ts.
551		 */
552		lp->ul_index = INDEX_NEXT(lp->ul_index);
553	}
554
555	/*
556	 * robust walkers must be advanced.  In debug mode, non-robust
557	 * walkers are also on the list.  If there are any, it's an error.
558	 */
559	for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk;
560	    wp = wp->ulw_next) {
561		if (wp->ulw_robust) {
562			if (np == wp->ulw_next_result)
563				(void) list_walk_advance(wp, lp);
564		} else if (wp->ulw_next_result != NULL) {
565			uu_panic("uu_list_remove(%p, %p): active non-robust "
566			    "walker\n", (void *)lp, elem);
567		}
568	}
569
570	np->uln_next->uln_prev = np->uln_prev;
571	np->uln_prev->uln_next = np->uln_next;
572
573	lp->ul_numnodes--;
574
575	np->uln_next = POOL_TO_MARKER(lp->ul_pool);
576	np->uln_prev = NULL;
577}
578
579void *
580uu_list_teardown(uu_list_t *lp, void **cookie)
581{
582	void *ep;
583
584	/*
585	 * XXX: disable list modification until list is empty
586	 */
587	if (lp->ul_debug && *cookie != NULL)
588		uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
589		    (void *)lp, (void *)cookie);
590
591	ep = uu_list_first(lp);
592	if (ep)
593		uu_list_remove(lp, ep);
594	return (ep);
595}
596
597int
598uu_list_insert_before(uu_list_t *lp, void *target, void *elem)
599{
600	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
601
602	if (target == NULL)
603		np = &lp->ul_null_node;
604
605	if (lp->ul_debug) {
606		if (np->uln_prev == NULL)
607			uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
608			    "not currently on a list\n",
609			    (void *)lp, target, elem, target);
610	}
611	if (lp->ul_sorted) {
612		if (lp->ul_debug)
613			uu_panic("uu_list_insert_before(%p, ...): list is "
614			    "UU_LIST_SORTED\n", (void *)lp);
615		uu_set_error(UU_ERROR_NOT_SUPPORTED);
616		return (-1);
617	}
618
619	list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
620	return (0);
621}
622
623int
624uu_list_insert_after(uu_list_t *lp, void *target, void *elem)
625{
626	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
627
628	if (target == NULL)
629		np = &lp->ul_null_node;
630
631	if (lp->ul_debug) {
632		if (np->uln_prev == NULL)
633			uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
634			    "not currently on a list\n",
635			    (void *)lp, target, elem, target);
636	}
637	if (lp->ul_sorted) {
638		if (lp->ul_debug)
639			uu_panic("uu_list_insert_after(%p, ...): list is "
640			    "UU_LIST_SORTED\n", (void *)lp);
641		uu_set_error(UU_ERROR_NOT_SUPPORTED);
642		return (-1);
643	}
644
645	list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next);
646	return (0);
647}
648
649size_t
650uu_list_numnodes(uu_list_t *lp)
651{
652	return (lp->ul_numnodes);
653}
654
655void *
656uu_list_first(uu_list_t *lp)
657{
658	uu_list_node_impl_t *n = lp->ul_null_node.uln_next;
659	if (n == &lp->ul_null_node)
660		return (NULL);
661	return (NODE_TO_ELEM(lp, n));
662}
663
664void *
665uu_list_last(uu_list_t *lp)
666{
667	uu_list_node_impl_t *n = lp->ul_null_node.uln_prev;
668	if (n == &lp->ul_null_node)
669		return (NULL);
670	return (NODE_TO_ELEM(lp, n));
671}
672
673void *
674uu_list_next(uu_list_t *lp, void *elem)
675{
676	uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
677
678	n = n->uln_next;
679	if (n == &lp->ul_null_node)
680		return (NULL);
681	return (NODE_TO_ELEM(lp, n));
682}
683
684void *
685uu_list_prev(uu_list_t *lp, void *elem)
686{
687	uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
688
689	n = n->uln_prev;
690	if (n == &lp->ul_null_node)
691		return (NULL);
692	return (NODE_TO_ELEM(lp, n));
693}
694
695/*
696 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
697 */
698void
699uu_list_lockup(void)
700{
701	uu_list_pool_t *pp;
702
703	(void) pthread_mutex_lock(&uu_lpool_list_lock);
704	for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
705	    pp = pp->ulp_next)
706		(void) pthread_mutex_lock(&pp->ulp_lock);
707}
708
709void
710uu_list_release(void)
711{
712	uu_list_pool_t *pp;
713
714	for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
715	    pp = pp->ulp_next)
716		(void) pthread_mutex_unlock(&pp->ulp_lock);
717	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
718}
719