merge.c revision 92905
1101099Srwatson/*-
2101099Srwatson * Copyright (c) 1992, 1993
3115497Srwatson *	The Regents of the University of California.  All rights reserved.
4101099Srwatson *
5101099Srwatson * This code is derived from software contributed to Berkeley by
6101099Srwatson * Peter McIlroy.
7101099Srwatson *
8106393Srwatson * Redistribution and use in source and binary forms, with or without
9106393Srwatson * modification, are permitted provided that the following conditions
10106393Srwatson * are met:
11106393Srwatson * 1. Redistributions of source code must retain the above copyright
12101099Srwatson *    notice, this list of conditions and the following disclaimer.
13101099Srwatson * 2. Redistributions in binary form must reproduce the above copyright
14101099Srwatson *    notice, this list of conditions and the following disclaimer in the
15101099Srwatson *    documentation and/or other materials provided with the distribution.
16101099Srwatson * 3. All advertising materials mentioning features or use of this software
17101099Srwatson *    must display the following acknowledgement:
18101099Srwatson *	This product includes software developed by the University of
19101099Srwatson *	California, Berkeley and its contributors.
20101099Srwatson * 4. Neither the name of the University nor the names of its contributors
21101099Srwatson *    may be used to endorse or promote products derived from this software
22101099Srwatson *    without specific prior written permission.
23101099Srwatson *
24101099Srwatson * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25101099Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26101099Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27101099Srwatson * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28101099Srwatson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29101099Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30101099Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31101099Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32101099Srwatson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33101099Srwatson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34101099Srwatson * SUCH DAMAGE.
35101099Srwatson */
36101099Srwatson
37101099Srwatson#if defined(LIBC_SCCS) && !defined(lint)
38101099Srwatsonstatic char sccsid[] = "@(#)merge.c	8.2 (Berkeley) 2/14/94";
39101099Srwatson#endif /* LIBC_SCCS and not lint */
40101099Srwatson#include <sys/cdefs.h>
41101099Srwatson__FBSDID("$FreeBSD: head/lib/libc/stdlib/merge.c 92905 2002-03-21 22:49:10Z obrien $");
42101099Srwatson
43101099Srwatson/*
44101099Srwatson * Hybrid exponential search/linear search merge sort with hybrid
45101099Srwatson * natural/pairwise first pass.  Requires about .3% more comparisons
46105988Srwatson * for random data than LSMS with pairwise first pass alone.
47101099Srwatson * It works for objects as small as two bytes.
48101099Srwatson */
49103183Sbde
50101099Srwatson#define NATURAL
51101099Srwatson#define THRESHOLD 16	/* Best choice for natural merge cut-off. */
52115497Srwatson
53101099Srwatson/* #define NATURAL to get hybrid natural merge.
54101099Srwatson * (The default is pairwise merging.)
55101099Srwatson */
56105696Srwatson
57101099Srwatson#include <sys/types.h>
58101099Srwatson
59101099Srwatson#include <errno.h>
60101099Srwatson#include <stdlib.h>
61101099Srwatson#include <string.h>
62101099Srwatson
63101099Srwatsonstatic void setup(u_char *, u_char *, size_t, size_t, int (*)());
64101099Srwatsonstatic void insertionsort(u_char *, size_t, size_t, int (*)());
65101099Srwatson
66101099Srwatson#define ISIZE sizeof(int)
67101099Srwatson#define PSIZE sizeof(u_char *)
68101099Srwatson#define ICOPY_LIST(src, dst, last)				\
69101099Srwatson	do							\
70101099Srwatson	*(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE;	\
71101099Srwatson	while(src < last)
72101099Srwatson#define ICOPY_ELT(src, dst, i)					\
73101099Srwatson	do							\
74101099Srwatson	*(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE;	\
75101099Srwatson	while (i -= ISIZE)
76101099Srwatson
77101099Srwatson#define CCOPY_LIST(src, dst, last)		\
78101099Srwatson	do					\
79101099Srwatson		*dst++ = *src++;		\
80101099Srwatson	while (src < last)
81101099Srwatson#define CCOPY_ELT(src, dst, i)			\
82101099Srwatson	do					\
83101099Srwatson		*dst++ = *src++;		\
84101099Srwatson	while (i -= 1)
85105988Srwatson
86105988Srwatson/*
87105988Srwatson * Find the next possible pointer head.  (Trickery for forcing an array
88105988Srwatson * to do double duty as a linked list when objects do not align with word
89107731Srwatson * boundaries.
90101099Srwatson */
91101099Srwatson/* Assumption: PSIZE is a power of 2. */
92102980Srwatson#define EVAL(p) (u_char **)						\
93101099Srwatson	((u_char *)0 +							\
94101099Srwatson	    (((u_char *)p + PSIZE - 1 - (u_char *) 0) & ~(PSIZE - 1)))
95101099Srwatson
96101099Srwatson/*
97101099Srwatson * Arguments are as for qsort.
98101099Srwatson */
99101099Srwatsonint
100101099Srwatsonmergesort(base, nmemb, size, cmp)
101101099Srwatson	void *base;
102101099Srwatson	size_t nmemb;
103101099Srwatson	size_t size;
104101099Srwatson	int (*cmp)(const void *, const void *);
105101099Srwatson{
106101099Srwatson	int i, sense;
107101099Srwatson	int big, iflag;
108101099Srwatson	u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
109105643Srwatson	u_char *list2, *list1, *p2, *p, *last, **p1;
110105643Srwatson
111105643Srwatson	if (size < PSIZE / 2) {		/* Pointers must fit into 2 * size. */
112105643Srwatson		errno = EINVAL;
113105606Srwatson		return (-1);
114105606Srwatson	}
115105606Srwatson
116105606Srwatson	if (nmemb == 0)
117105606Srwatson		return (0);
118105637Srwatson
119101099Srwatson	/*
120105637Srwatson	 * XXX
121105637Srwatson	 * Stupid subtraction for the Cray.
122101099Srwatson	 */
123101099Srwatson	iflag = 0;
124101099Srwatson	if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE))
125101099Srwatson		iflag = 1;
126101099Srwatson
127101099Srwatson	if ((list2 = malloc(nmemb * size + PSIZE)) == NULL)
128105643Srwatson		return (-1);
129105643Srwatson
130105643Srwatson	list1 = base;
131105643Srwatson	setup(list1, list2, nmemb, size, cmp);
132105643Srwatson	last = list2 + nmemb * size;
133105643Srwatson	i = big = 0;
134105643Srwatson	while (*EVAL(list2) != last) {
135105643Srwatson	    l2 = list1;
136105643Srwatson	    p1 = EVAL(list1);
137105643Srwatson	    for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) {
138101099Srwatson	    	p2 = *EVAL(p2);
139104514Srwatson	    	f1 = l2;
140101099Srwatson	    	f2 = l1 = list1 + (p2 - list2);
141101099Srwatson	    	if (p2 != last)
142101099Srwatson	    		p2 = *EVAL(p2);
143104514Srwatson	    	l2 = list1 + (p2 - list2);
144101099Srwatson	    	while (f1 < l1 && f2 < l2) {
145101099Srwatson	    		if ((*cmp)(f1, f2) <= 0) {
146101099Srwatson	    			q = f2;
147101099Srwatson	    			b = f1, t = l1;
148101099Srwatson	    			sense = -1;
149101099Srwatson	    		} else {
150101099Srwatson	    			q = f1;
151101099Srwatson	    			b = f2, t = l2;
152101099Srwatson	    			sense = 0;
153101099Srwatson	    		}
154101099Srwatson	    		if (!big) {	/* here i = 0 */
155101099Srwatson				while ((b += size) < t && cmp(q, b) >sense)
156101099Srwatson	    				if (++i == 6) {
157101099Srwatson	    					big = 1;
158101099Srwatson	    					goto EXPONENTIAL;
159105634Srwatson	    				}
160105634Srwatson	    		} else {
161105634SrwatsonEXPONENTIAL:	    		for (i = size; ; i <<= 1)
162105634Srwatson	    				if ((p = (b + i)) >= t) {
163105634Srwatson	    					if ((p = t - size) > b &&
164105634Srwatson						    (*cmp)(q, p) <= sense)
165105634Srwatson	    						t = p;
166105634Srwatson	    					else
167105634Srwatson	    						b = p;
168101099Srwatson	    					break;
169101099Srwatson	    				} else if ((*cmp)(q, p) <= sense) {
170101099Srwatson	    					t = p;
171105643Srwatson	    					if (i == size)
172101099Srwatson	    						big = 0;
173105736Srwatson	    					goto FASTCASE;
174101099Srwatson	    				} else
175101099Srwatson	    					b = p;
176101099Srwatson				while (t > b+size) {
177101099Srwatson	    				i = (((t - b) / size) >> 1) * size;
178101099Srwatson	    				if ((*cmp)(q, p = b + i) <= sense)
179101099Srwatson	    					t = p;
180101099Srwatson	    				else
181101099Srwatson	    					b = p;
182101099Srwatson	    			}
183101099Srwatson	    			goto COPY;
184101099SrwatsonFASTCASE:	    		while (i > size)
185101099Srwatson	    				if ((*cmp)(q,
186101099Srwatson	    					p = b + (i >>= 1)) <= sense)
187101099Srwatson	    					t = p;
188101099Srwatson	    				else
189101099Srwatson	    					b = p;
190101099SrwatsonCOPY:	    			b = t;
191101099Srwatson	    		}
192101099Srwatson	    		i = size;
193101099Srwatson	    		if (q == f1) {
194101099Srwatson	    			if (iflag) {
195101099Srwatson	    				ICOPY_LIST(f2, tp2, b);
196101099Srwatson	    				ICOPY_ELT(f1, tp2, i);
197101099Srwatson	    			} else {
198101099Srwatson	    				CCOPY_LIST(f2, tp2, b);
199101099Srwatson	    				CCOPY_ELT(f1, tp2, i);
200101099Srwatson	    			}
201101099Srwatson	    		} else {
202105643Srwatson	    			if (iflag) {
203105643Srwatson	    				ICOPY_LIST(f1, tp2, b);
204105643Srwatson	    				ICOPY_ELT(f2, tp2, i);
205105643Srwatson	    			} else {
206105643Srwatson	    				CCOPY_LIST(f1, tp2, b);
207101099Srwatson	    				CCOPY_ELT(f2, tp2, i);
208101099Srwatson	    			}
209101099Srwatson	    		}
210101099Srwatson	    	}
211101099Srwatson	    	if (f2 < l2) {
212101099Srwatson	    		if (iflag)
213101099Srwatson	    			ICOPY_LIST(f2, tp2, l2);
214101099Srwatson	    		else
215101099Srwatson	    			CCOPY_LIST(f2, tp2, l2);
216101099Srwatson	    	} else if (f1 < l1) {
217101099Srwatson	    		if (iflag)
218101099Srwatson	    			ICOPY_LIST(f1, tp2, l1);
219101099Srwatson	    		else
220101099Srwatson	    			CCOPY_LIST(f1, tp2, l1);
221105988Srwatson	    	}
222105988Srwatson	    	*p1 = l2;
223105988Srwatson	    }
224105988Srwatson	    tp2 = list1;	/* swap list1, list2 */
225106174Srwatson	    list1 = list2;
226105988Srwatson	    list2 = tp2;
227105988Srwatson	    last = list2 + nmemb*size;
228105988Srwatson	}
229105988Srwatson	if (base == list2) {
230105988Srwatson		memmove(list2, list1, nmemb*size);
231105988Srwatson		list2 = list1;
232105988Srwatson	}
233105988Srwatson	free(list2);
234101099Srwatson	return (0);
235101099Srwatson}
236101099Srwatson
237101099Srwatson#define	swap(a, b) {					\
238101099Srwatson		s = b;					\
239101099Srwatson		i = size;				\
240101099Srwatson		do {					\
241101099Srwatson			tmp = *a; *a++ = *s; *s++ = tmp; \
242101099Srwatson		} while (--i);				\
243101099Srwatson		a -= size;				\
244101099Srwatson	}
245101099Srwatson#define reverse(bot, top) {				\
246101099Srwatson	s = top;					\
247103750Srwatson	do {						\
248101099Srwatson		i = size;				\
249103750Srwatson		do {					\
250101099Srwatson			tmp = *bot; *bot++ = *s; *s++ = tmp; \
251101099Srwatson		} while (--i);				\
252101099Srwatson		s -= size2;				\
253101099Srwatson	} while(bot < s);				\
254101099Srwatson}
255101099Srwatson
256101099Srwatson/*
257101099Srwatson * Optional hybrid natural/pairwise first pass.  Eats up list1 in runs of
258101099Srwatson * increasing order, list2 in a corresponding linked list.  Checks for runs
259101099Srwatson * when THRESHOLD/2 pairs compare with same sense.  (Only used when NATURAL
260101099Srwatson * is defined.  Otherwise simple pairwise merging is used.)
261101099Srwatson */
262101099Srwatsonvoid
263101099Srwatsonsetup(list1, list2, n, size, cmp)
264101099Srwatson	size_t n, size;
265101099Srwatson	int (*cmp)(const void *, const void *);
266101099Srwatson	u_char *list1, *list2;
267101099Srwatson{
268101099Srwatson	int i, length, size2, tmp, sense;
269101099Srwatson	u_char *f1, *f2, *s, *l2, *last, *p2;
270101099Srwatson
271101099Srwatson	size2 = size*2;
272101099Srwatson	if (n <= 5) {
273101099Srwatson		insertionsort(list1, n, size, cmp);
274101099Srwatson		*EVAL(list2) = (u_char*) list2 + n*size;
275101099Srwatson		return;
276101099Srwatson	}
277101099Srwatson	/*
278101099Srwatson	 * Avoid running pointers out of bounds; limit n to evens
279101099Srwatson	 * for simplicity.
280101099Srwatson	 */
281101099Srwatson	i = 4 + (n & 1);
282101099Srwatson	insertionsort(list1 + (n - i) * size, i, size, cmp);
283101099Srwatson	last = list1 + size * (n - i);
284101099Srwatson	*EVAL(list2 + (last - list1)) = list2 + n * size;
285101099Srwatson
286101099Srwatson#ifdef NATURAL
287101099Srwatson	p2 = list2;
288101099Srwatson	f1 = list1;
289101099Srwatson	sense = (cmp(f1, f1 + size) > 0);
290101099Srwatson	for (; f1 < last; sense = !sense) {
291101099Srwatson		length = 2;
292101099Srwatson					/* Find pairs with same sense. */
293101099Srwatson		for (f2 = f1 + size2; f2 < last; f2 += size2) {
294101099Srwatson			if ((cmp(f2, f2+ size) > 0) != sense)
295105634Srwatson				break;
296105634Srwatson			length += 2;
297105634Srwatson		}
298105634Srwatson		if (length < THRESHOLD) {		/* Pairwise merge */
299105634Srwatson			do {
300105634Srwatson				p2 = *EVAL(p2) = f1 + size2 - list1 + list2;
301105634Srwatson				if (sense > 0)
302105634Srwatson					swap (f1, f1 + size);
303105634Srwatson			} while ((f1 += size2) < f2);
304105634Srwatson		} else {				/* Natural merge */
305105634Srwatson			l2 = f2;
306105637Srwatson			for (f2 = f1 + size2; f2 < l2; f2 += size2) {
307105634Srwatson				if ((cmp(f2-size, f2) > 0) != sense) {
308105634Srwatson					p2 = *EVAL(p2) = f2 - list1 + list2;
309105634Srwatson					if (sense > 0)
310105634Srwatson						reverse(f1, f2-size);
311105634Srwatson					f1 = f2;
312105634Srwatson				}
313106090Srwatson			}
314105634Srwatson			if (sense > 0)
315105634Srwatson				reverse (f1, f2-size);
316105634Srwatson			f1 = f2;
317105634Srwatson			if (f2 < last || cmp(f2 - size, f2) > 0)
318106090Srwatson				p2 = *EVAL(p2) = f2 - list1 + list2;
319105634Srwatson			else
320105634Srwatson				p2 = *EVAL(p2) = list2 + n*size;
321105634Srwatson		}
322105634Srwatson	}
323105634Srwatson#else		/* pairwise merge only. */
324105634Srwatson	for (f1 = list1, p2 = list2; f1 < last; f1 += size2) {
325105634Srwatson		p2 = *EVAL(p2) = p2 + size2;
326105634Srwatson		if (cmp (f1, f1 + size) > 0)
327105634Srwatson			swap(f1, f1 + size);
328105634Srwatson	}
329105634Srwatson#endif /* NATURAL */
330105634Srwatson}
331105634Srwatson
332105634Srwatson/*
333105634Srwatson * This is to avoid out-of-bounds addresses in sorting the
334105634Srwatson * last 4 elements.
335105634Srwatson */
336105634Srwatsonstatic void
337105634Srwatsoninsertionsort(a, n, size, cmp)
338106091Srwatson	u_char *a;
339105988Srwatson	size_t n, size;
340105988Srwatson	int (*cmp)(const void *, const void *);
341105988Srwatson{
342105988Srwatson	u_char *ai, *s, *t, *u, tmp;
343105988Srwatson	int i;
344105988Srwatson
345105988Srwatson	for (ai = a+size; --n >= 1; ai += size)
346105988Srwatson		for (t = ai; t > a; t -= size) {
347105988Srwatson			u = t - size;
348105634Srwatson			if (cmp(u, t) <= 0)
349101099Srwatson				break;
350101099Srwatson			swap(u, t);
351101099Srwatson		}
352101099Srwatson}
353101099Srwatson