1/*
2 * balloc.c
3 *
4 * PURPOSE
5 *	Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 *	This file is distributed under the terms of the GNU General Public
9 *	License (GPL). Copies of the GPL can be obtained from:
10 *		ftp://prep.ai.mit.edu/pub/gnu/GPL
11 *	Each contributing author retains all rights to their own work.
12 *
13 *  (C) 1999-2001 Ben Fennema
14 *  (C) 1999 Stelias Computing Inc
15 *
16 * HISTORY
17 *
18 *  02/24/99 blf  Created.
19 *
20 */
21
22#include "udfdecl.h"
23
24#include <linux/quotaops.h>
25#include <linux/buffer_head.h>
26#include <linux/bitops.h>
27
28#include "udf_i.h"
29#include "udf_sb.h"
30
31#define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
32#define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
33#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34#define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35#define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
36
37#define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38#define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
39#define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
40#define uintBPL_t uint(BITS_PER_LONG)
41#define uint(x) xuint(x)
42#define xuint(x) __le ## x
43
44static inline int find_next_one_bit (void * addr, int size, int offset)
45{
46	uintBPL_t * p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
47	int result = offset & ~(BITS_PER_LONG-1);
48	unsigned long tmp;
49
50	if (offset >= size)
51		return size;
52	size -= result;
53	offset &= (BITS_PER_LONG-1);
54	if (offset)
55	{
56		tmp = leBPL_to_cpup(p++);
57		tmp &= ~0UL << offset;
58		if (size < BITS_PER_LONG)
59			goto found_first;
60		if (tmp)
61			goto found_middle;
62		size -= BITS_PER_LONG;
63		result += BITS_PER_LONG;
64	}
65	while (size & ~(BITS_PER_LONG-1))
66	{
67		if ((tmp = leBPL_to_cpup(p++)))
68			goto found_middle;
69		result += BITS_PER_LONG;
70		size -= BITS_PER_LONG;
71	}
72	if (!size)
73		return result;
74	tmp = leBPL_to_cpup(p);
75found_first:
76	tmp &= ~0UL >> (BITS_PER_LONG-size);
77found_middle:
78	return result + ffz(~tmp);
79}
80
81#define find_first_one_bit(addr, size)\
82	find_next_one_bit((addr), (size), 0)
83
84static int read_block_bitmap(struct super_block * sb,
85	struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr)
86{
87	struct buffer_head *bh = NULL;
88	int retval = 0;
89	kernel_lb_addr loc;
90
91	loc.logicalBlockNum = bitmap->s_extPosition;
92	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
93
94	bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
95	if (!bh)
96	{
97		retval = -EIO;
98	}
99	bitmap->s_block_bitmap[bitmap_nr] = bh;
100	return retval;
101}
102
103static int __load_block_bitmap(struct super_block * sb,
104	struct udf_bitmap *bitmap, unsigned int block_group)
105{
106	int retval = 0;
107	int nr_groups = bitmap->s_nr_groups;
108
109	if (block_group >= nr_groups)
110	{
111		udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, nr_groups);
112	}
113
114	if (bitmap->s_block_bitmap[block_group])
115		return block_group;
116	else
117	{
118		retval = read_block_bitmap(sb, bitmap, block_group, block_group);
119		if (retval < 0)
120			return retval;
121		return block_group;
122	}
123}
124
125static inline int load_block_bitmap(struct super_block * sb,
126	struct udf_bitmap *bitmap, unsigned int block_group)
127{
128	int slot;
129
130	slot = __load_block_bitmap(sb, bitmap, block_group);
131
132	if (slot < 0)
133		return slot;
134
135	if (!bitmap->s_block_bitmap[slot])
136		return -EIO;
137
138	return slot;
139}
140
141static void udf_bitmap_free_blocks(struct super_block * sb,
142	struct inode * inode,
143	struct udf_bitmap *bitmap,
144	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
145{
146	struct udf_sb_info *sbi = UDF_SB(sb);
147	struct buffer_head * bh = NULL;
148	unsigned long block;
149	unsigned long block_group;
150	unsigned long bit;
151	unsigned long i;
152	int bitmap_nr;
153	unsigned long overflow;
154
155	mutex_lock(&sbi->s_alloc_mutex);
156	if (bloc.logicalBlockNum < 0 ||
157		(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
158	{
159		udf_debug("%d < %d || %d + %d > %d\n",
160			bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
161			UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
162		goto error_return;
163	}
164
165	block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);
166
167do_more:
168	overflow = 0;
169	block_group = block >> (sb->s_blocksize_bits + 3);
170	bit = block % (sb->s_blocksize << 3);
171
172	/*
173	 * Check to see if we are freeing blocks across a group boundary.
174	 */
175	if (bit + count > (sb->s_blocksize << 3))
176	{
177		overflow = bit + count - (sb->s_blocksize << 3);
178		count -= overflow;
179	}
180	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
181	if (bitmap_nr < 0)
182		goto error_return;
183
184	bh = bitmap->s_block_bitmap[bitmap_nr];
185	for (i=0; i < count; i++)
186	{
187		if (udf_set_bit(bit + i, bh->b_data))
188		{
189			udf_debug("bit %ld already set\n", bit + i);
190			udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
191		}
192		else
193		{
194			if (inode)
195				DQUOT_FREE_BLOCK(inode, 1);
196			if (UDF_SB_LVIDBH(sb))
197			{
198				UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
199					cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+1);
200			}
201		}
202	}
203	mark_buffer_dirty(bh);
204	if (overflow)
205	{
206		block += count;
207		count = overflow;
208		goto do_more;
209	}
210error_return:
211	sb->s_dirt = 1;
212	if (UDF_SB_LVIDBH(sb))
213		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
214	mutex_unlock(&sbi->s_alloc_mutex);
215	return;
216}
217
218static int udf_bitmap_prealloc_blocks(struct super_block * sb,
219	struct inode * inode,
220	struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block,
221	uint32_t block_count)
222{
223	struct udf_sb_info *sbi = UDF_SB(sb);
224	int alloc_count = 0;
225	int bit, block, block_group, group_start;
226	int nr_groups, bitmap_nr;
227	struct buffer_head *bh;
228
229	mutex_lock(&sbi->s_alloc_mutex);
230	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
231		goto out;
232
233	if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
234		block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
235
236repeat:
237	nr_groups = (UDF_SB_PARTLEN(sb, partition) +
238		(sizeof(struct spaceBitmapDesc) << 3) + (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
239	block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
240	block_group = block >> (sb->s_blocksize_bits + 3);
241	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
242
243	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
244	if (bitmap_nr < 0)
245		goto out;
246	bh = bitmap->s_block_bitmap[bitmap_nr];
247
248	bit = block % (sb->s_blocksize << 3);
249
250	while (bit < (sb->s_blocksize << 3) && block_count > 0)
251	{
252		if (!udf_test_bit(bit, bh->b_data))
253			goto out;
254		else if (DQUOT_PREALLOC_BLOCK(inode, 1))
255			goto out;
256		else if (!udf_clear_bit(bit, bh->b_data))
257		{
258			udf_debug("bit already cleared for block %d\n", bit);
259			DQUOT_FREE_BLOCK(inode, 1);
260			goto out;
261		}
262		block_count --;
263		alloc_count ++;
264		bit ++;
265		block ++;
266	}
267	mark_buffer_dirty(bh);
268	if (block_count > 0)
269		goto repeat;
270out:
271	if (UDF_SB_LVIDBH(sb))
272	{
273		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
274			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
275		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
276	}
277	sb->s_dirt = 1;
278	mutex_unlock(&sbi->s_alloc_mutex);
279	return alloc_count;
280}
281
282static int udf_bitmap_new_block(struct super_block * sb,
283	struct inode * inode,
284	struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err)
285{
286	struct udf_sb_info *sbi = UDF_SB(sb);
287	int newbit, bit=0, block, block_group, group_start;
288	int end_goal, nr_groups, bitmap_nr, i;
289	struct buffer_head *bh = NULL;
290	char *ptr;
291	int newblock = 0;
292
293	*err = -ENOSPC;
294	mutex_lock(&sbi->s_alloc_mutex);
295
296repeat:
297	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
298		goal = 0;
299
300	nr_groups = bitmap->s_nr_groups;
301	block = goal + (sizeof(struct spaceBitmapDesc) << 3);
302	block_group = block >> (sb->s_blocksize_bits + 3);
303	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
304
305	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
306	if (bitmap_nr < 0)
307		goto error_return;
308	bh = bitmap->s_block_bitmap[bitmap_nr];
309	ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
310
311	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
312	{
313		bit = block % (sb->s_blocksize << 3);
314
315		if (udf_test_bit(bit, bh->b_data))
316		{
317			goto got_block;
318		}
319		end_goal = (bit + 63) & ~63;
320		bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
321		if (bit < end_goal)
322			goto got_block;
323		ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
324		newbit = (ptr - ((char *)bh->b_data)) << 3;
325		if (newbit < sb->s_blocksize << 3)
326		{
327			bit = newbit;
328			goto search_back;
329		}
330		newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
331		if (newbit < sb->s_blocksize << 3)
332		{
333			bit = newbit;
334			goto got_block;
335		}
336	}
337
338	for (i=0; i<(nr_groups*2); i++)
339	{
340		block_group ++;
341		if (block_group >= nr_groups)
342			block_group = 0;
343		group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
344
345		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
346		if (bitmap_nr < 0)
347			goto error_return;
348		bh = bitmap->s_block_bitmap[bitmap_nr];
349		if (i < nr_groups)
350		{
351			ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
352			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
353			{
354				bit = (ptr - ((char *)bh->b_data)) << 3;
355				break;
356			}
357		}
358		else
359		{
360			bit = udf_find_next_one_bit((char *)bh->b_data, sb->s_blocksize << 3, group_start << 3);
361			if (bit < sb->s_blocksize << 3)
362				break;
363		}
364	}
365	if (i >= (nr_groups*2))
366	{
367		mutex_unlock(&sbi->s_alloc_mutex);
368		return newblock;
369	}
370	if (bit < sb->s_blocksize << 3)
371		goto search_back;
372	else
373		bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
374	if (bit >= sb->s_blocksize << 3)
375	{
376		mutex_unlock(&sbi->s_alloc_mutex);
377		return 0;
378	}
379
380search_back:
381	for (i=0; i<7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--);
382
383got_block:
384
385	/*
386	 * Check quota for allocation of this block.
387	 */
388	if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
389	{
390		mutex_unlock(&sbi->s_alloc_mutex);
391		*err = -EDQUOT;
392		return 0;
393	}
394
395	newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
396		(sizeof(struct spaceBitmapDesc) << 3);
397
398	if (!udf_clear_bit(bit, bh->b_data))
399	{
400		udf_debug("bit already cleared for block %d\n", bit);
401		goto repeat;
402	}
403
404	mark_buffer_dirty(bh);
405
406	if (UDF_SB_LVIDBH(sb))
407	{
408		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
409			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
410		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
411	}
412	sb->s_dirt = 1;
413	mutex_unlock(&sbi->s_alloc_mutex);
414	*err = 0;
415	return newblock;
416
417error_return:
418	*err = -EIO;
419	mutex_unlock(&sbi->s_alloc_mutex);
420	return 0;
421}
422
423static void udf_table_free_blocks(struct super_block * sb,
424	struct inode * inode,
425	struct inode * table,
426	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
427{
428	struct udf_sb_info *sbi = UDF_SB(sb);
429	uint32_t start, end;
430	uint32_t elen;
431	kernel_lb_addr eloc;
432	struct extent_position oepos, epos;
433	int8_t etype;
434	int i;
435
436	mutex_lock(&sbi->s_alloc_mutex);
437	if (bloc.logicalBlockNum < 0 ||
438		(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
439	{
440		udf_debug("%d < %d || %d + %d > %d\n",
441			bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
442			UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
443		goto error_return;
444	}
445
446	/* We do this up front - There are some error conditions that could occure,
447	   but.. oh well */
448	if (inode)
449		DQUOT_FREE_BLOCK(inode, count);
450	if (UDF_SB_LVIDBH(sb))
451	{
452		UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
453			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+count);
454		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
455	}
456
457	start = bloc.logicalBlockNum + offset;
458	end = bloc.logicalBlockNum + offset + count - 1;
459
460	epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
461	elen = 0;
462	epos.block = oepos.block = UDF_I_LOCATION(table);
463	epos.bh = oepos.bh = NULL;
464
465	while (count && (etype =
466		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
467	{
468		if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) ==
469			start))
470		{
471			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
472			{
473				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
474				start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
475				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
476			}
477			else
478			{
479				elen = (etype << 30) |
480					(elen + (count << sb->s_blocksize_bits));
481				start += count;
482				count = 0;
483			}
484			udf_write_aext(table, &oepos, eloc, elen, 1);
485		}
486		else if (eloc.logicalBlockNum == (end + 1))
487		{
488			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
489			{
490				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
491				end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
492				eloc.logicalBlockNum -=
493					((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
494				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
495			}
496			else
497			{
498				eloc.logicalBlockNum = start;
499				elen = (etype << 30) |
500					(elen + (count << sb->s_blocksize_bits));
501				end -= count;
502				count = 0;
503			}
504			udf_write_aext(table, &oepos, eloc, elen, 1);
505		}
506
507		if (epos.bh != oepos.bh)
508		{
509			i = -1;
510			oepos.block = epos.block;
511			brelse(oepos.bh);
512			get_bh(epos.bh);
513			oepos.bh = epos.bh;
514			oepos.offset = 0;
515		}
516		else
517			oepos.offset = epos.offset;
518	}
519
520	if (count)
521	{
522		/* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
523				 a new block, and since we hold the super block lock already
524				 very bad things would happen :)
525
526				 We copy the behavior of udf_add_aext, but instead of
527				 trying to allocate a new block close to the existing one,
528				 we just steal a block from the extent we are trying to add.
529
530				 It would be nice if the blocks were close together, but it
531				 isn't required.
532		*/
533
534		int adsize;
535		short_ad *sad = NULL;
536		long_ad *lad = NULL;
537		struct allocExtDesc *aed;
538
539		eloc.logicalBlockNum = start;
540		elen = EXT_RECORDED_ALLOCATED |
541			(count << sb->s_blocksize_bits);
542
543		if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
544			adsize = sizeof(short_ad);
545		else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
546			adsize = sizeof(long_ad);
547		else
548		{
549			brelse(oepos.bh);
550			brelse(epos.bh);
551			goto error_return;
552		}
553
554		if (epos.offset + (2 * adsize) > sb->s_blocksize)
555		{
556			char *sptr, *dptr;
557			int loffset;
558
559			brelse(oepos.bh);
560			oepos = epos;
561
562			/* Steal a block from the extent being free'd */
563			epos.block.logicalBlockNum = eloc.logicalBlockNum;
564			eloc.logicalBlockNum ++;
565			elen -= sb->s_blocksize;
566
567			if (!(epos.bh = udf_tread(sb,
568				udf_get_lb_pblock(sb, epos.block, 0))))
569			{
570				brelse(oepos.bh);
571				goto error_return;
572			}
573			aed = (struct allocExtDesc *)(epos.bh->b_data);
574			aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
575			if (epos.offset + adsize > sb->s_blocksize)
576			{
577				loffset = epos.offset;
578				aed->lengthAllocDescs = cpu_to_le32(adsize);
579				sptr = UDF_I_DATA(inode) + epos.offset -
580					udf_file_entry_alloc_offset(inode) +
581					UDF_I_LENEATTR(inode) - adsize;
582				dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
583				memcpy(dptr, sptr, adsize);
584				epos.offset = sizeof(struct allocExtDesc) + adsize;
585			}
586			else
587			{
588				loffset = epos.offset + adsize;
589				aed->lengthAllocDescs = cpu_to_le32(0);
590				sptr = oepos.bh->b_data + epos.offset;
591				epos.offset = sizeof(struct allocExtDesc);
592
593				if (oepos.bh)
594				{
595					aed = (struct allocExtDesc *)oepos.bh->b_data;
596					aed->lengthAllocDescs =
597						cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
598				}
599				else
600				{
601					UDF_I_LENALLOC(table) += adsize;
602					mark_inode_dirty(table);
603				}
604			}
605			if (UDF_SB_UDFREV(sb) >= 0x0200)
606				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
607					epos.block.logicalBlockNum, sizeof(tag));
608			else
609				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
610					epos.block.logicalBlockNum, sizeof(tag));
611			switch (UDF_I_ALLOCTYPE(table))
612			{
613				case ICBTAG_FLAG_AD_SHORT:
614				{
615					sad = (short_ad *)sptr;
616					sad->extLength = cpu_to_le32(
617						EXT_NEXT_EXTENT_ALLOCDECS |
618						sb->s_blocksize);
619					sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
620					break;
621				}
622				case ICBTAG_FLAG_AD_LONG:
623				{
624					lad = (long_ad *)sptr;
625					lad->extLength = cpu_to_le32(
626						EXT_NEXT_EXTENT_ALLOCDECS |
627						sb->s_blocksize);
628					lad->extLocation = cpu_to_lelb(epos.block);
629					break;
630				}
631			}
632			if (oepos.bh)
633			{
634				udf_update_tag(oepos.bh->b_data, loffset);
635				mark_buffer_dirty(oepos.bh);
636			}
637			else
638				mark_inode_dirty(table);
639		}
640
641		if (elen) /* It's possible that stealing the block emptied the extent */
642		{
643			udf_write_aext(table, &epos, eloc, elen, 1);
644
645			if (!epos.bh)
646			{
647				UDF_I_LENALLOC(table) += adsize;
648				mark_inode_dirty(table);
649			}
650			else
651			{
652				aed = (struct allocExtDesc *)epos.bh->b_data;
653				aed->lengthAllocDescs =
654					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
655				udf_update_tag(epos.bh->b_data, epos.offset);
656				mark_buffer_dirty(epos.bh);
657			}
658		}
659	}
660
661	brelse(epos.bh);
662	brelse(oepos.bh);
663
664error_return:
665	sb->s_dirt = 1;
666	mutex_unlock(&sbi->s_alloc_mutex);
667	return;
668}
669
670static int udf_table_prealloc_blocks(struct super_block * sb,
671	struct inode * inode,
672	struct inode *table, uint16_t partition, uint32_t first_block,
673	uint32_t block_count)
674{
675	struct udf_sb_info *sbi = UDF_SB(sb);
676	int alloc_count = 0;
677	uint32_t elen, adsize;
678	kernel_lb_addr eloc;
679	struct extent_position epos;
680	int8_t etype = -1;
681
682	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
683		return 0;
684
685	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
686		adsize = sizeof(short_ad);
687	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
688		adsize = sizeof(long_ad);
689	else
690		return 0;
691
692	mutex_lock(&sbi->s_alloc_mutex);
693	epos.offset = sizeof(struct unallocSpaceEntry);
694	epos.block = UDF_I_LOCATION(table);
695	epos.bh = NULL;
696	eloc.logicalBlockNum = 0xFFFFFFFF;
697
698	while (first_block != eloc.logicalBlockNum && (etype =
699		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
700	{
701		udf_debug("eloc=%d, elen=%d, first_block=%d\n",
702			eloc.logicalBlockNum, elen, first_block);
703		; /* empty loop body */
704	}
705
706	if (first_block == eloc.logicalBlockNum)
707	{
708		epos.offset -= adsize;
709
710		alloc_count = (elen >> sb->s_blocksize_bits);
711		if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count))
712			alloc_count = 0;
713		else if (alloc_count > block_count)
714		{
715			alloc_count = block_count;
716			eloc.logicalBlockNum += alloc_count;
717			elen -= (alloc_count << sb->s_blocksize_bits);
718			udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
719		}
720		else
721			udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
722	}
723	else
724		alloc_count = 0;
725
726	brelse(epos.bh);
727
728	if (alloc_count && UDF_SB_LVIDBH(sb))
729	{
730		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
731			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
732		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
733		sb->s_dirt = 1;
734	}
735	mutex_unlock(&sbi->s_alloc_mutex);
736	return alloc_count;
737}
738
739static int udf_table_new_block(struct super_block * sb,
740	struct inode * inode,
741	struct inode *table, uint16_t partition, uint32_t goal, int *err)
742{
743	struct udf_sb_info *sbi = UDF_SB(sb);
744	uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
745	uint32_t newblock = 0, adsize;
746	uint32_t elen, goal_elen = 0;
747	kernel_lb_addr eloc, goal_eloc;
748	struct extent_position epos, goal_epos;
749	int8_t etype;
750
751	*err = -ENOSPC;
752
753	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
754		adsize = sizeof(short_ad);
755	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
756		adsize = sizeof(long_ad);
757	else
758		return newblock;
759
760	mutex_lock(&sbi->s_alloc_mutex);
761	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
762		goal = 0;
763
764	/* We search for the closest matching block to goal. If we find a exact hit,
765	   we stop. Otherwise we keep going till we run out of extents.
766	   We store the buffer_head, bloc, and extoffset of the current closest
767	   match and use that when we are done.
768	*/
769	epos.offset = sizeof(struct unallocSpaceEntry);
770	epos.block = UDF_I_LOCATION(table);
771	epos.bh = goal_epos.bh = NULL;
772
773	while (spread && (etype =
774		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
775	{
776		if (goal >= eloc.logicalBlockNum)
777		{
778			if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
779				nspread = 0;
780			else
781				nspread = goal - eloc.logicalBlockNum -
782					(elen >> sb->s_blocksize_bits);
783		}
784		else
785			nspread = eloc.logicalBlockNum - goal;
786
787		if (nspread < spread)
788		{
789			spread = nspread;
790			if (goal_epos.bh != epos.bh)
791			{
792				brelse(goal_epos.bh);
793				goal_epos.bh = epos.bh;
794				get_bh(goal_epos.bh);
795			}
796			goal_epos.block = epos.block;
797			goal_epos.offset = epos.offset - adsize;
798			goal_eloc = eloc;
799			goal_elen = (etype << 30) | elen;
800		}
801	}
802
803	brelse(epos.bh);
804
805	if (spread == 0xFFFFFFFF)
806	{
807		brelse(goal_epos.bh);
808		mutex_unlock(&sbi->s_alloc_mutex);
809		return 0;
810	}
811
812	/* Only allocate blocks from the beginning of the extent.
813	   That way, we only delete (empty) extents, never have to insert an
814	   extent because of splitting */
815	/* This works, but very poorly.... */
816
817	newblock = goal_eloc.logicalBlockNum;
818	goal_eloc.logicalBlockNum ++;
819	goal_elen -= sb->s_blocksize;
820
821	if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
822	{
823		brelse(goal_epos.bh);
824		mutex_unlock(&sbi->s_alloc_mutex);
825		*err = -EDQUOT;
826		return 0;
827	}
828
829	if (goal_elen)
830		udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
831	else
832		udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
833	brelse(goal_epos.bh);
834
835	if (UDF_SB_LVIDBH(sb))
836	{
837		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
838			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
839		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
840	}
841
842	sb->s_dirt = 1;
843	mutex_unlock(&sbi->s_alloc_mutex);
844	*err = 0;
845	return newblock;
846}
847
848inline void udf_free_blocks(struct super_block * sb,
849	struct inode * inode,
850	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
851{
852	uint16_t partition = bloc.partitionReferenceNum;
853
854	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
855	{
856		return udf_bitmap_free_blocks(sb, inode,
857			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
858			bloc, offset, count);
859	}
860	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
861	{
862		return udf_table_free_blocks(sb, inode,
863			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
864			bloc, offset, count);
865	}
866	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
867	{
868		return udf_bitmap_free_blocks(sb, inode,
869			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
870			bloc, offset, count);
871	}
872	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
873	{
874		return udf_table_free_blocks(sb, inode,
875			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
876			bloc, offset, count);
877	}
878	else
879		return;
880}
881
882inline int udf_prealloc_blocks(struct super_block * sb,
883	struct inode * inode,
884	uint16_t partition, uint32_t first_block, uint32_t block_count)
885{
886	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
887	{
888		return udf_bitmap_prealloc_blocks(sb, inode,
889			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
890			partition, first_block, block_count);
891	}
892	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
893	{
894		return udf_table_prealloc_blocks(sb, inode,
895			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
896			partition, first_block, block_count);
897	}
898	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
899	{
900		return udf_bitmap_prealloc_blocks(sb, inode,
901			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
902			partition, first_block, block_count);
903	}
904	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
905	{
906		return udf_table_prealloc_blocks(sb, inode,
907			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
908			partition, first_block, block_count);
909	}
910	else
911		return 0;
912}
913
914inline int udf_new_block(struct super_block * sb,
915	struct inode * inode,
916	uint16_t partition, uint32_t goal, int *err)
917{
918	int ret;
919
920	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
921	{
922		ret = udf_bitmap_new_block(sb, inode,
923			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
924			partition, goal, err);
925		return ret;
926	}
927	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
928	{
929		return udf_table_new_block(sb, inode,
930			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
931			partition, goal, err);
932	}
933	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
934	{
935		return udf_bitmap_new_block(sb, inode,
936			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
937			partition, goal, err);
938	}
939	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
940	{
941		return udf_table_new_block(sb, inode,
942			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
943			partition, goal, err);
944	}
945	else
946	{
947		*err = -EIO;
948		return 0;
949	}
950}
951