11573Srgrimes/*-
21573Srgrimes * Copyright (c) 1991, 1993
31573Srgrimes *	The Regents of the University of California.  All rights reserved.
4264042Stheraven * Copyright (c) 2014 David T. Chisnall
5264042Stheraven * All rights reserved.
61573Srgrimes *
71573Srgrimes * This code is derived from software contributed to Berkeley by
81573Srgrimes * Ronnie Kon at Mindcraft Inc., Kevin Lew and Elmer Yglesias.
91573Srgrimes *
101573Srgrimes * Redistribution and use in source and binary forms, with or without
111573Srgrimes * modification, are permitted provided that the following conditions
121573Srgrimes * are met:
131573Srgrimes * 1. Redistributions of source code must retain the above copyright
141573Srgrimes *    notice, this list of conditions and the following disclaimer.
151573Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
161573Srgrimes *    notice, this list of conditions and the following disclaimer in the
171573Srgrimes *    documentation and/or other materials provided with the distribution.
18251069Semaste * 3. Neither the name of the University nor the names of its contributors
191573Srgrimes *    may be used to endorse or promote products derived from this software
201573Srgrimes *    without specific prior written permission.
211573Srgrimes *
221573Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
231573Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
241573Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
251573Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
261573Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
271573Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
281573Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
291573Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
301573Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
311573Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
321573Srgrimes * SUCH DAMAGE.
331573Srgrimes */
341573Srgrimes
351573Srgrimes#if defined(LIBC_SCCS) && !defined(lint)
361573Srgrimesstatic char sccsid[] = "@(#)heapsort.c	8.1 (Berkeley) 6/4/93";
371573Srgrimes#endif /* LIBC_SCCS and not lint */
3892889Sobrien#include <sys/cdefs.h>
3992889Sobrien__FBSDID("$FreeBSD$");
401573Srgrimes
411573Srgrimes#include <errno.h>
4215312Sbde#include <stddef.h>
431573Srgrimes#include <stdlib.h>
441573Srgrimes
45264042Stheraven#ifdef I_AM_HEAPSORT_B
46264042Stheraven#include "block_abi.h"
47264042Stheraven#define COMPAR(x, y) CALL_BLOCK(compar, x, y)
48264143Stheraventypedef DECLARE_BLOCK(int, heapsort_block, const void *, const void *);
49264042Stheraven#else
50264042Stheraven#define COMPAR(x, y) compar(x, y)
51264042Stheraven#endif
52264042Stheraven
531573Srgrimes/*
541573Srgrimes * Swap two areas of size number of bytes.  Although qsort(3) permits random
551573Srgrimes * blocks of memory to be sorted, sorting pointers is almost certainly the
561573Srgrimes * common case (and, were it not, could easily be made so).  Regardless, it
571573Srgrimes * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer
581573Srgrimes * arithmetic gets lost in the time required for comparison function calls.
591573Srgrimes */
601573Srgrimes#define	SWAP(a, b, count, size, tmp) { \
611573Srgrimes	count = size; \
621573Srgrimes	do { \
631573Srgrimes		tmp = *a; \
641573Srgrimes		*a++ = *b; \
651573Srgrimes		*b++ = tmp; \
661573Srgrimes	} while (--count); \
671573Srgrimes}
681573Srgrimes
691573Srgrimes/* Copy one block of size size to another. */
701573Srgrimes#define COPY(a, b, count, size, tmp1, tmp2) { \
711573Srgrimes	count = size; \
721573Srgrimes	tmp1 = a; \
731573Srgrimes	tmp2 = b; \
741573Srgrimes	do { \
751573Srgrimes		*tmp1++ = *tmp2++; \
761573Srgrimes	} while (--count); \
771573Srgrimes}
781573Srgrimes
791573Srgrimes/*
801573Srgrimes * Build the list into a heap, where a heap is defined such that for
811573Srgrimes * the records K1 ... KN, Kj/2 >= Kj for 1 <= j/2 <= j <= N.
821573Srgrimes *
831573Srgrimes * There two cases.  If j == nmemb, select largest of Ki and Kj.  If
841573Srgrimes * j < nmemb, select largest of Ki, Kj and Kj+1.
851573Srgrimes */
861573Srgrimes#define CREATE(initval, nmemb, par_i, child_i, par, child, size, count, tmp) { \
871573Srgrimes	for (par_i = initval; (child_i = par_i * 2) <= nmemb; \
881573Srgrimes	    par_i = child_i) { \
891573Srgrimes		child = base + child_i * size; \
90264042Stheraven		if (child_i < nmemb && COMPAR(child, child + size) < 0) { \
911573Srgrimes			child += size; \
921573Srgrimes			++child_i; \
931573Srgrimes		} \
941573Srgrimes		par = base + par_i * size; \
95264042Stheraven		if (COMPAR(child, par) <= 0) \
961573Srgrimes			break; \
971573Srgrimes		SWAP(par, child, count, size, tmp); \
981573Srgrimes	} \
991573Srgrimes}
1001573Srgrimes
1011573Srgrimes/*
1021573Srgrimes * Select the top of the heap and 'heapify'.  Since by far the most expensive
1031573Srgrimes * action is the call to the compar function, a considerable optimization
1041573Srgrimes * in the average case can be achieved due to the fact that k, the displaced
105298830Spfg * elememt, is usually quite small, so it would be preferable to first
1061573Srgrimes * heapify, always maintaining the invariant that the larger child is copied
1071573Srgrimes * over its parent's record.
1081573Srgrimes *
1091573Srgrimes * Then, starting from the *bottom* of the heap, finding k's correct place,
1101573Srgrimes * again maintianing the invariant.  As a result of the invariant no element
1111573Srgrimes * is 'lost' when k is assigned its correct place in the heap.
1121573Srgrimes *
1131573Srgrimes * The time savings from this optimization are on the order of 15-20% for the
1141573Srgrimes * average case. See Knuth, Vol. 3, page 158, problem 18.
1151573Srgrimes *
1161573Srgrimes * XXX Don't break the #define SELECT line, below.  Reiser cpp gets upset.
1171573Srgrimes */
1181573Srgrimes#define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \
1191573Srgrimes	for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \
1201573Srgrimes		child = base + child_i * size; \
121264042Stheraven		if (child_i < nmemb && COMPAR(child, child + size) < 0) { \
1221573Srgrimes			child += size; \
1231573Srgrimes			++child_i; \
1241573Srgrimes		} \
1251573Srgrimes		par = base + par_i * size; \
1261573Srgrimes		COPY(par, child, count, size, tmp1, tmp2); \
1271573Srgrimes	} \
1281573Srgrimes	for (;;) { \
1291573Srgrimes		child_i = par_i; \
1301573Srgrimes		par_i = child_i / 2; \
1311573Srgrimes		child = base + child_i * size; \
1321573Srgrimes		par = base + par_i * size; \
133264042Stheraven		if (child_i == 1 || COMPAR(k, par) < 0) { \
1341573Srgrimes			COPY(child, k, count, size, tmp1, tmp2); \
1351573Srgrimes			break; \
1361573Srgrimes		} \
1371573Srgrimes		COPY(child, par, count, size, tmp1, tmp2); \
1381573Srgrimes	} \
1391573Srgrimes}
1401573Srgrimes
141288005Srodrigc#ifdef I_AM_HEAPSORT_B
142288026Srodrigcint heapsort_b(void *, size_t, size_t, heapsort_block);
143288005Srodrigc#else
144288026Srodrigcint heapsort(void *, size_t, size_t,
145288026Srodrigc    int (*)(const void *, const void *));
146288005Srodrigc#endif
1471573Srgrimes/*
1481573Srgrimes * Heapsort -- Knuth, Vol. 3, page 145.  Runs in O (N lg N), both average
1491573Srgrimes * and worst.  While heapsort is faster than the worst case of quicksort,
1501573Srgrimes * the BSD quicksort does median selection so that the chance of finding
1511573Srgrimes * a data set that will trigger the worst case is nonexistent.  Heapsort's
1521573Srgrimes * only advantage over quicksort is that it requires little additional memory.
1531573Srgrimes */
154264042Stheraven#ifdef I_AM_HEAPSORT_B
1551573Srgrimesint
156287793Srodrigcheapsort_b(void *vbase, size_t nmemb, size_t size, heapsort_block compar)
157264042Stheraven#else
158264042Stheravenint
159287793Srodrigcheapsort(void *vbase, size_t nmemb, size_t size,
160287793Srodrigc    int (*compar)(const void *, const void *))
161264042Stheraven#endif
1621573Srgrimes{
163175259Sdas	size_t cnt, i, j, l;
16492889Sobrien	char tmp, *tmp1, *tmp2;
1651573Srgrimes	char *base, *k, *p, *t;
1661573Srgrimes
1671573Srgrimes	if (nmemb <= 1)
1681573Srgrimes		return (0);
1691573Srgrimes
1701573Srgrimes	if (!size) {
1711573Srgrimes		errno = EINVAL;
1721573Srgrimes		return (-1);
1731573Srgrimes	}
1741573Srgrimes
1751573Srgrimes	if ((k = malloc(size)) == NULL)
1761573Srgrimes		return (-1);
1771573Srgrimes
1781573Srgrimes	/*
1791573Srgrimes	 * Items are numbered from 1 to nmemb, so offset from size bytes
1801573Srgrimes	 * below the starting address.
1811573Srgrimes	 */
1821573Srgrimes	base = (char *)vbase - size;
1831573Srgrimes
1841573Srgrimes	for (l = nmemb / 2 + 1; --l;)
1851573Srgrimes		CREATE(l, nmemb, i, j, t, p, size, cnt, tmp);
1861573Srgrimes
1871573Srgrimes	/*
1881573Srgrimes	 * For each element of the heap, save the largest element into its
1891573Srgrimes	 * final slot, save the displaced element (k), then recreate the
1901573Srgrimes	 * heap.
1911573Srgrimes	 */
1921573Srgrimes	while (nmemb > 1) {
1931573Srgrimes		COPY(k, base + nmemb * size, cnt, size, tmp1, tmp2);
1941573Srgrimes		COPY(base + nmemb * size, base + size, cnt, size, tmp1, tmp2);
1951573Srgrimes		--nmemb;
1961573Srgrimes		SELECT(i, j, nmemb, t, p, size, k, cnt, tmp1, tmp2);
1971573Srgrimes	}
1981573Srgrimes	free(k);
1991573Srgrimes	return (0);
2001573Srgrimes}
201