1/*
2 * A Remote Heap.  Remote means that we don't touch the memory that the
3 * heap points to. Normal heap implementations use the memory they manage
4 * to place their list. We cannot do that because the memory we manage may
5 * have special properties, for example it is uncachable or of different
6 * endianess.
7 *
8 * Author: Pantelis Antoniou <panto@intracom.gr>
9 *
10 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
11 * the terms of the GNU General Public License version 2. This program
12 * is licensed "as is" without any warranty of any kind, whether express
13 * or implied.
14 */
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/slab.h>
20
21#include <asm/rheap.h>
22
23/*
24 * Fixup a list_head, needed when copying lists.  If the pointers fall
25 * between s and e, apply the delta.  This assumes that
26 * sizeof(struct list_head *) == sizeof(unsigned long *).
27 */
28static inline void fixup(unsigned long s, unsigned long e, int d,
29			 struct list_head *l)
30{
31	unsigned long *pp;
32
33	pp = (unsigned long *)&l->next;
34	if (*pp >= s && *pp < e)
35		*pp += d;
36
37	pp = (unsigned long *)&l->prev;
38	if (*pp >= s && *pp < e)
39		*pp += d;
40}
41
42/* Grow the allocated blocks */
43static int grow(rh_info_t * info, int max_blocks)
44{
45	rh_block_t *block, *blk;
46	int i, new_blocks;
47	int delta;
48	unsigned long blks, blke;
49
50	if (max_blocks <= info->max_blocks)
51		return -EINVAL;
52
53	new_blocks = max_blocks - info->max_blocks;
54
55	block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
56	if (block == NULL)
57		return -ENOMEM;
58
59	if (info->max_blocks > 0) {
60
61		/* copy old block area */
62		memcpy(block, info->block,
63		       sizeof(rh_block_t) * info->max_blocks);
64
65		delta = (char *)block - (char *)info->block;
66
67		/* and fixup list pointers */
68		blks = (unsigned long)info->block;
69		blke = (unsigned long)(info->block + info->max_blocks);
70
71		for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
72			fixup(blks, blke, delta, &blk->list);
73
74		fixup(blks, blke, delta, &info->empty_list);
75		fixup(blks, blke, delta, &info->free_list);
76		fixup(blks, blke, delta, &info->taken_list);
77
78		/* free the old allocated memory */
79		if ((info->flags & RHIF_STATIC_BLOCK) == 0)
80			kfree(info->block);
81	}
82
83	info->block = block;
84	info->empty_slots += new_blocks;
85	info->max_blocks = max_blocks;
86	info->flags &= ~RHIF_STATIC_BLOCK;
87
88	/* add all new blocks to the free list */
89	blk = block + info->max_blocks - new_blocks;
90	for (i = 0; i < new_blocks; i++, blk++)
91		list_add(&blk->list, &info->empty_list);
92
93	return 0;
94}
95
96/*
97 * Assure at least the required amount of empty slots.  If this function
98 * causes a grow in the block area then all pointers kept to the block
99 * area are invalid!
100 */
101static int assure_empty(rh_info_t * info, int slots)
102{
103	int max_blocks;
104
105	/* This function is not meant to be used to grow uncontrollably */
106	if (slots >= 4)
107		return -EINVAL;
108
109	/* Enough space */
110	if (info->empty_slots >= slots)
111		return 0;
112
113	/* Next 16 sized block */
114	max_blocks = ((info->max_blocks + slots) + 15) & ~15;
115
116	return grow(info, max_blocks);
117}
118
119static rh_block_t *get_slot(rh_info_t * info)
120{
121	rh_block_t *blk;
122
123	/* If no more free slots, and failure to extend. */
124	if (info->empty_slots == 0) {
125		printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
126		return NULL;
127	}
128
129	/* Get empty slot to use */
130	blk = list_entry(info->empty_list.next, rh_block_t, list);
131	list_del_init(&blk->list);
132	info->empty_slots--;
133
134	/* Initialize */
135	blk->start = 0;
136	blk->size = 0;
137	blk->owner = NULL;
138
139	return blk;
140}
141
142static inline void release_slot(rh_info_t * info, rh_block_t * blk)
143{
144	list_add(&blk->list, &info->empty_list);
145	info->empty_slots++;
146}
147
148static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
149{
150	rh_block_t *blk;
151	rh_block_t *before;
152	rh_block_t *after;
153	rh_block_t *next;
154	int size;
155	unsigned long s, e, bs, be;
156	struct list_head *l;
157
158	/* We assume that they are aligned properly */
159	size = blkn->size;
160	s = blkn->start;
161	e = s + size;
162
163	/* Find the blocks immediately before and after the given one
164	 * (if any) */
165	before = NULL;
166	after = NULL;
167	next = NULL;
168
169	list_for_each(l, &info->free_list) {
170		blk = list_entry(l, rh_block_t, list);
171
172		bs = blk->start;
173		be = bs + blk->size;
174
175		if (next == NULL && s >= bs)
176			next = blk;
177
178		if (be == s)
179			before = blk;
180
181		if (e == bs)
182			after = blk;
183
184		/* If both are not null, break now */
185		if (before != NULL && after != NULL)
186			break;
187	}
188
189	/* Now check if they are really adjacent */
190	if (before && s != (before->start + before->size))
191		before = NULL;
192
193	if (after && e != after->start)
194		after = NULL;
195
196	/* No coalescing; list insert and return */
197	if (before == NULL && after == NULL) {
198
199		if (next != NULL)
200			list_add(&blkn->list, &next->list);
201		else
202			list_add(&blkn->list, &info->free_list);
203
204		return;
205	}
206
207	/* We don't need it anymore */
208	release_slot(info, blkn);
209
210	/* Grow the before block */
211	if (before != NULL && after == NULL) {
212		before->size += size;
213		return;
214	}
215
216	/* Grow the after block backwards */
217	if (before == NULL && after != NULL) {
218		after->start -= size;
219		after->size += size;
220		return;
221	}
222
223	/* Grow the before block, and release the after block */
224	before->size += size + after->size;
225	list_del(&after->list);
226	release_slot(info, after);
227}
228
229static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
230{
231	rh_block_t *blk;
232	struct list_head *l;
233
234	/* Find the block immediately before the given one (if any) */
235	list_for_each(l, &info->taken_list) {
236		blk = list_entry(l, rh_block_t, list);
237		if (blk->start > blkn->start) {
238			list_add_tail(&blkn->list, &blk->list);
239			return;
240		}
241	}
242
243	list_add_tail(&blkn->list, &info->taken_list);
244}
245
246/*
247 * Create a remote heap dynamically.  Note that no memory for the blocks
248 * are allocated.  It will upon the first allocation
249 */
250rh_info_t *rh_create(unsigned int alignment)
251{
252	rh_info_t *info;
253
254	/* Alignment must be a power of two */
255	if ((alignment & (alignment - 1)) != 0)
256		return ERR_PTR(-EINVAL);
257
258	info = kmalloc(sizeof(*info), GFP_KERNEL);
259	if (info == NULL)
260		return ERR_PTR(-ENOMEM);
261
262	info->alignment = alignment;
263
264	/* Initially everything as empty */
265	info->block = NULL;
266	info->max_blocks = 0;
267	info->empty_slots = 0;
268	info->flags = 0;
269
270	INIT_LIST_HEAD(&info->empty_list);
271	INIT_LIST_HEAD(&info->free_list);
272	INIT_LIST_HEAD(&info->taken_list);
273
274	return info;
275}
276
277/*
278 * Destroy a dynamically created remote heap.  Deallocate only if the areas
279 * are not static
280 */
281void rh_destroy(rh_info_t * info)
282{
283	if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
284		kfree(info->block);
285
286	if ((info->flags & RHIF_STATIC_INFO) == 0)
287		kfree(info);
288}
289
290/*
291 * Initialize in place a remote heap info block.  This is needed to support
292 * operation very early in the startup of the kernel, when it is not yet safe
293 * to call kmalloc.
294 */
295void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
296	     rh_block_t * block)
297{
298	int i;
299	rh_block_t *blk;
300
301	/* Alignment must be a power of two */
302	if ((alignment & (alignment - 1)) != 0)
303		return;
304
305	info->alignment = alignment;
306
307	/* Initially everything as empty */
308	info->block = block;
309	info->max_blocks = max_blocks;
310	info->empty_slots = max_blocks;
311	info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
312
313	INIT_LIST_HEAD(&info->empty_list);
314	INIT_LIST_HEAD(&info->free_list);
315	INIT_LIST_HEAD(&info->taken_list);
316
317	/* Add all new blocks to the free list */
318	for (i = 0, blk = block; i < max_blocks; i++, blk++)
319		list_add(&blk->list, &info->empty_list);
320}
321
322/* Attach a free memory region, coalesces regions if adjuscent */
323int rh_attach_region(rh_info_t * info, unsigned long start, int size)
324{
325	rh_block_t *blk;
326	unsigned long s, e, m;
327	int r;
328
329	/* The region must be aligned */
330	s = start;
331	e = s + size;
332	m = info->alignment - 1;
333
334	/* Round start up */
335	s = (s + m) & ~m;
336
337	/* Round end down */
338	e = e & ~m;
339
340	if (IS_ERR_VALUE(e) || (e < s))
341		return -ERANGE;
342
343	/* Take final values */
344	start = s;
345	size = e - s;
346
347	/* Grow the blocks, if needed */
348	r = assure_empty(info, 1);
349	if (r < 0)
350		return r;
351
352	blk = get_slot(info);
353	blk->start = start;
354	blk->size = size;
355	blk->owner = NULL;
356
357	attach_free_block(info, blk);
358
359	return 0;
360}
361
362/* Detatch given address range, splits free block if needed. */
363unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
364{
365	struct list_head *l;
366	rh_block_t *blk, *newblk;
367	unsigned long s, e, m, bs, be;
368
369	/* Validate size */
370	if (size <= 0)
371		return (unsigned long) -EINVAL;
372
373	/* The region must be aligned */
374	s = start;
375	e = s + size;
376	m = info->alignment - 1;
377
378	/* Round start up */
379	s = (s + m) & ~m;
380
381	/* Round end down */
382	e = e & ~m;
383
384	if (assure_empty(info, 1) < 0)
385		return (unsigned long) -ENOMEM;
386
387	blk = NULL;
388	list_for_each(l, &info->free_list) {
389		blk = list_entry(l, rh_block_t, list);
390		/* The range must lie entirely inside one free block */
391		bs = blk->start;
392		be = blk->start + blk->size;
393		if (s >= bs && e <= be)
394			break;
395		blk = NULL;
396	}
397
398	if (blk == NULL)
399		return (unsigned long) -ENOMEM;
400
401	/* Perfect fit */
402	if (bs == s && be == e) {
403		/* Delete from free list, release slot */
404		list_del(&blk->list);
405		release_slot(info, blk);
406		return s;
407	}
408
409	/* blk still in free list, with updated start and/or size */
410	if (bs == s || be == e) {
411		if (bs == s)
412			blk->start += size;
413		blk->size -= size;
414
415	} else {
416		/* The front free fragment */
417		blk->size = s - bs;
418
419		/* the back free fragment */
420		newblk = get_slot(info);
421		newblk->start = e;
422		newblk->size = be - e;
423
424		list_add(&newblk->list, &blk->list);
425	}
426
427	return s;
428}
429
430/* Allocate a block of memory at the specified alignment.  The value returned
431 * is an offset into the buffer initialized by rh_init(), or a negative number
432 * if there is an error.
433 */
434unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
435{
436	struct list_head *l;
437	rh_block_t *blk;
438	rh_block_t *newblk;
439	unsigned long start, sp_size;
440
441	/* Validate size, and alignment must be power of two */
442	if (size <= 0 || (alignment & (alignment - 1)) != 0)
443		return (unsigned long) -EINVAL;
444
445	/* Align to configured alignment */
446	size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
447
448	if (assure_empty(info, 2) < 0)
449		return (unsigned long) -ENOMEM;
450
451	blk = NULL;
452	list_for_each(l, &info->free_list) {
453		blk = list_entry(l, rh_block_t, list);
454		if (size <= blk->size) {
455			start = (blk->start + alignment - 1) & ~(alignment - 1);
456			if (start + size <= blk->start + blk->size)
457				break;
458		}
459		blk = NULL;
460	}
461
462	if (blk == NULL)
463		return (unsigned long) -ENOMEM;
464
465	/* Just fits */
466	if (blk->size == size) {
467		/* Move from free list to taken list */
468		list_del(&blk->list);
469		newblk = blk;
470	} else {
471		/* Fragment caused, split if needed */
472		/* Create block for fragment in the beginning */
473		sp_size = start - blk->start;
474		if (sp_size) {
475			rh_block_t *spblk;
476
477			spblk = get_slot(info);
478			spblk->start = blk->start;
479			spblk->size = sp_size;
480			/* add before the blk */
481			list_add(&spblk->list, blk->list.prev);
482		}
483		newblk = get_slot(info);
484		newblk->start = start;
485		newblk->size = size;
486
487		/* blk still in free list, with updated start and size
488		 * for fragment in the end */
489		blk->start = start + size;
490		blk->size -= sp_size + size;
491		/* No fragment in the end, remove blk */
492		if (blk->size == 0) {
493			list_del(&blk->list);
494			release_slot(info, blk);
495		}
496	}
497
498	newblk->owner = owner;
499	attach_taken_block(info, newblk);
500
501	return start;
502}
503
504/* Allocate a block of memory at the default alignment.  The value returned is
505 * an offset into the buffer initialized by rh_init(), or a negative number if
506 * there is an error.
507 */
508unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
509{
510	return rh_alloc_align(info, size, info->alignment, owner);
511}
512
513/* Allocate a block of memory at the given offset, rounded up to the default
514 * alignment.  The value returned is an offset into the buffer initialized by
515 * rh_init(), or a negative number if there is an error.
516 */
517unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
518{
519	struct list_head *l;
520	rh_block_t *blk, *newblk1, *newblk2;
521	unsigned long s, e, m, bs = 0, be = 0;
522
523	/* Validate size */
524	if (size <= 0)
525		return (unsigned long) -EINVAL;
526
527	/* The region must be aligned */
528	s = start;
529	e = s + size;
530	m = info->alignment - 1;
531
532	/* Round start up */
533	s = (s + m) & ~m;
534
535	/* Round end down */
536	e = e & ~m;
537
538	if (assure_empty(info, 2) < 0)
539		return (unsigned long) -ENOMEM;
540
541	blk = NULL;
542	list_for_each(l, &info->free_list) {
543		blk = list_entry(l, rh_block_t, list);
544		/* The range must lie entirely inside one free block */
545		bs = blk->start;
546		be = blk->start + blk->size;
547		if (s >= bs && e <= be)
548			break;
549	}
550
551	if (blk == NULL)
552		return (unsigned long) -ENOMEM;
553
554	/* Perfect fit */
555	if (bs == s && be == e) {
556		/* Move from free list to taken list */
557		list_del(&blk->list);
558		blk->owner = owner;
559
560		start = blk->start;
561		attach_taken_block(info, blk);
562
563		return start;
564
565	}
566
567	/* blk still in free list, with updated start and/or size */
568	if (bs == s || be == e) {
569		if (bs == s)
570			blk->start += size;
571		blk->size -= size;
572
573	} else {
574		/* The front free fragment */
575		blk->size = s - bs;
576
577		/* The back free fragment */
578		newblk2 = get_slot(info);
579		newblk2->start = e;
580		newblk2->size = be - e;
581
582		list_add(&newblk2->list, &blk->list);
583	}
584
585	newblk1 = get_slot(info);
586	newblk1->start = s;
587	newblk1->size = e - s;
588	newblk1->owner = owner;
589
590	start = newblk1->start;
591	attach_taken_block(info, newblk1);
592
593	return start;
594}
595
596/* Deallocate the memory previously allocated by one of the rh_alloc functions.
597 * The return value is the size of the deallocated block, or a negative number
598 * if there is an error.
599 */
600int rh_free(rh_info_t * info, unsigned long start)
601{
602	rh_block_t *blk, *blk2;
603	struct list_head *l;
604	int size;
605
606	/* Linear search for block */
607	blk = NULL;
608	list_for_each(l, &info->taken_list) {
609		blk2 = list_entry(l, rh_block_t, list);
610		if (start < blk2->start)
611			break;
612		blk = blk2;
613	}
614
615	if (blk == NULL || start > (blk->start + blk->size))
616		return -EINVAL;
617
618	/* Remove from taken list */
619	list_del(&blk->list);
620
621	/* Get size of freed block */
622	size = blk->size;
623	attach_free_block(info, blk);
624
625	return size;
626}
627
628int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
629{
630	rh_block_t *blk;
631	struct list_head *l;
632	struct list_head *h;
633	int nr;
634
635	switch (what) {
636
637	case RHGS_FREE:
638		h = &info->free_list;
639		break;
640
641	case RHGS_TAKEN:
642		h = &info->taken_list;
643		break;
644
645	default:
646		return -EINVAL;
647	}
648
649	/* Linear search for block */
650	nr = 0;
651	list_for_each(l, h) {
652		blk = list_entry(l, rh_block_t, list);
653		if (stats != NULL && nr < max_stats) {
654			stats->start = blk->start;
655			stats->size = blk->size;
656			stats->owner = blk->owner;
657			stats++;
658		}
659		nr++;
660	}
661
662	return nr;
663}
664
665int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
666{
667	rh_block_t *blk, *blk2;
668	struct list_head *l;
669	int size;
670
671	/* Linear search for block */
672	blk = NULL;
673	list_for_each(l, &info->taken_list) {
674		blk2 = list_entry(l, rh_block_t, list);
675		if (start < blk2->start)
676			break;
677		blk = blk2;
678	}
679
680	if (blk == NULL || start > (blk->start + blk->size))
681		return -EINVAL;
682
683	blk->owner = owner;
684	size = blk->size;
685
686	return size;
687}
688
689void rh_dump(rh_info_t * info)
690{
691	static rh_stats_t st[32];
692	int maxnr;
693	int i, nr;
694
695	maxnr = ARRAY_SIZE(st);
696
697	printk(KERN_INFO
698	       "info @0x%p (%d slots empty / %d max)\n",
699	       info, info->empty_slots, info->max_blocks);
700
701	printk(KERN_INFO "  Free:\n");
702	nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
703	if (nr > maxnr)
704		nr = maxnr;
705	for (i = 0; i < nr; i++)
706		printk(KERN_INFO
707		       "    0x%lx-0x%lx (%u)\n",
708		       st[i].start, st[i].start + st[i].size,
709		       st[i].size);
710	printk(KERN_INFO "\n");
711
712	printk(KERN_INFO "  Taken:\n");
713	nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
714	if (nr > maxnr)
715		nr = maxnr;
716	for (i = 0; i < nr; i++)
717		printk(KERN_INFO
718		       "    0x%lx-0x%lx (%u) %s\n",
719		       st[i].start, st[i].start + st[i].size,
720		       st[i].size, st[i].owner != NULL ? st[i].owner : "");
721	printk(KERN_INFO "\n");
722}
723
724void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
725{
726	printk(KERN_INFO
727	       "blk @0x%p: 0x%lx-0x%lx (%u)\n",
728	       blk, blk->start, blk->start + blk->size, blk->size);
729}
730