1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  linux/fs/hfsplus/extents.c
4 *
5 * Copyright (C) 2001
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 *
9 * Handling of Extents both in catalog and extents overflow trees
10 */
11
12#include <linux/errno.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15
16#include "hfsplus_fs.h"
17#include "hfsplus_raw.h"
18
19/* Compare two extents keys, returns 0 on same, pos/neg for difference */
20int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
21			const hfsplus_btree_key *k2)
22{
23	__be32 k1id, k2id;
24	__be32 k1s, k2s;
25
26	k1id = k1->ext.cnid;
27	k2id = k2->ext.cnid;
28	if (k1id != k2id)
29		return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
30
31	if (k1->ext.fork_type != k2->ext.fork_type)
32		return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
33
34	k1s = k1->ext.start_block;
35	k2s = k2->ext.start_block;
36	if (k1s == k2s)
37		return 0;
38	return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
39}
40
41static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
42				  u32 block, u8 type)
43{
44	key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
45	key->ext.cnid = cpu_to_be32(cnid);
46	key->ext.start_block = cpu_to_be32(block);
47	key->ext.fork_type = type;
48	key->ext.pad = 0;
49}
50
51static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
52{
53	int i;
54	u32 count;
55
56	for (i = 0; i < 8; ext++, i++) {
57		count = be32_to_cpu(ext->block_count);
58		if (off < count)
59			return be32_to_cpu(ext->start_block) + off;
60		off -= count;
61	}
62	/* panic? */
63	return 0;
64}
65
66static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
67{
68	int i;
69	u32 count = 0;
70
71	for (i = 0; i < 8; ext++, i++)
72		count += be32_to_cpu(ext->block_count);
73	return count;
74}
75
76static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
77{
78	int i;
79
80	ext += 7;
81	for (i = 0; i < 7; ext--, i++)
82		if (ext->block_count)
83			break;
84	return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
85}
86
87static int __hfsplus_ext_write_extent(struct inode *inode,
88		struct hfs_find_data *fd)
89{
90	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
91	int res;
92
93	WARN_ON(!mutex_is_locked(&hip->extents_lock));
94
95	hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
96			      HFSPLUS_IS_RSRC(inode) ?
97				HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
98
99	res = hfs_brec_find(fd, hfs_find_rec_by_key);
100	if (hip->extent_state & HFSPLUS_EXT_NEW) {
101		if (res != -ENOENT)
102			return res;
103		/* Fail early and avoid ENOSPC during the btree operation */
104		res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
105		if (res)
106			return res;
107		hfs_brec_insert(fd, hip->cached_extents,
108				sizeof(hfsplus_extent_rec));
109		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
110	} else {
111		if (res)
112			return res;
113		hfs_bnode_write(fd->bnode, hip->cached_extents,
114				fd->entryoffset, fd->entrylength);
115		hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
116	}
117
118	/*
119	 * We can't just use hfsplus_mark_inode_dirty here, because we
120	 * also get called from hfsplus_write_inode, which should not
121	 * redirty the inode.  Instead the callers have to be careful
122	 * to explicily mark the inode dirty, too.
123	 */
124	set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
125
126	return 0;
127}
128
129static int hfsplus_ext_write_extent_locked(struct inode *inode)
130{
131	int res = 0;
132
133	if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
134		struct hfs_find_data fd;
135
136		res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
137		if (res)
138			return res;
139		res = __hfsplus_ext_write_extent(inode, &fd);
140		hfs_find_exit(&fd);
141	}
142	return res;
143}
144
145int hfsplus_ext_write_extent(struct inode *inode)
146{
147	int res;
148
149	mutex_lock(&HFSPLUS_I(inode)->extents_lock);
150	res = hfsplus_ext_write_extent_locked(inode);
151	mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
152
153	return res;
154}
155
156static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
157					    struct hfsplus_extent *extent,
158					    u32 cnid, u32 block, u8 type)
159{
160	int res;
161
162	hfsplus_ext_build_key(fd->search_key, cnid, block, type);
163	fd->key->ext.cnid = 0;
164	res = hfs_brec_find(fd, hfs_find_rec_by_key);
165	if (res && res != -ENOENT)
166		return res;
167	if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
168	    fd->key->ext.fork_type != fd->search_key->ext.fork_type)
169		return -ENOENT;
170	if (fd->entrylength != sizeof(hfsplus_extent_rec))
171		return -EIO;
172	hfs_bnode_read(fd->bnode, extent, fd->entryoffset,
173		sizeof(hfsplus_extent_rec));
174	return 0;
175}
176
177static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
178		struct inode *inode, u32 block)
179{
180	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
181	int res;
182
183	WARN_ON(!mutex_is_locked(&hip->extents_lock));
184
185	if (hip->extent_state & HFSPLUS_EXT_DIRTY) {
186		res = __hfsplus_ext_write_extent(inode, fd);
187		if (res)
188			return res;
189	}
190
191	res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
192					block, HFSPLUS_IS_RSRC(inode) ?
193						HFSPLUS_TYPE_RSRC :
194						HFSPLUS_TYPE_DATA);
195	if (!res) {
196		hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
197		hip->cached_blocks =
198			hfsplus_ext_block_count(hip->cached_extents);
199	} else {
200		hip->cached_start = hip->cached_blocks = 0;
201		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
202	}
203	return res;
204}
205
206static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
207{
208	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
209	struct hfs_find_data fd;
210	int res;
211
212	if (block >= hip->cached_start &&
213	    block < hip->cached_start + hip->cached_blocks)
214		return 0;
215
216	res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
217	if (!res) {
218		res = __hfsplus_ext_cache_extent(&fd, inode, block);
219		hfs_find_exit(&fd);
220	}
221	return res;
222}
223
224/* Get a block at iblock for inode, possibly allocating if create */
225int hfsplus_get_block(struct inode *inode, sector_t iblock,
226		      struct buffer_head *bh_result, int create)
227{
228	struct super_block *sb = inode->i_sb;
229	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
230	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
231	int res = -EIO;
232	u32 ablock, dblock, mask;
233	sector_t sector;
234	int was_dirty = 0;
235
236	/* Convert inode block to disk allocation block */
237	ablock = iblock >> sbi->fs_shift;
238
239	if (iblock >= hip->fs_blocks) {
240		if (!create)
241			return 0;
242		if (iblock > hip->fs_blocks)
243			return -EIO;
244		if (ablock >= hip->alloc_blocks) {
245			res = hfsplus_file_extend(inode, false);
246			if (res)
247				return res;
248		}
249	} else
250		create = 0;
251
252	if (ablock < hip->first_blocks) {
253		dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
254		goto done;
255	}
256
257	if (inode->i_ino == HFSPLUS_EXT_CNID)
258		return -EIO;
259
260	mutex_lock(&hip->extents_lock);
261
262	/*
263	 * hfsplus_ext_read_extent will write out a cached extent into
264	 * the extents btree.  In that case we may have to mark the inode
265	 * dirty even for a pure read of an extent here.
266	 */
267	was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
268	res = hfsplus_ext_read_extent(inode, ablock);
269	if (res) {
270		mutex_unlock(&hip->extents_lock);
271		return -EIO;
272	}
273	dblock = hfsplus_ext_find_block(hip->cached_extents,
274					ablock - hip->cached_start);
275	mutex_unlock(&hip->extents_lock);
276
277done:
278	hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
279		inode->i_ino, (long long)iblock, dblock);
280
281	mask = (1 << sbi->fs_shift) - 1;
282	sector = ((sector_t)dblock << sbi->fs_shift) +
283		  sbi->blockoffset + (iblock & mask);
284	map_bh(bh_result, sb, sector);
285
286	if (create) {
287		set_buffer_new(bh_result);
288		hip->phys_size += sb->s_blocksize;
289		hip->fs_blocks++;
290		inode_add_bytes(inode, sb->s_blocksize);
291	}
292	if (create || was_dirty)
293		mark_inode_dirty(inode);
294	return 0;
295}
296
297static void hfsplus_dump_extent(struct hfsplus_extent *extent)
298{
299	int i;
300
301	hfs_dbg(EXTENT, "   ");
302	for (i = 0; i < 8; i++)
303		hfs_dbg_cont(EXTENT, " %u:%u",
304			     be32_to_cpu(extent[i].start_block),
305			     be32_to_cpu(extent[i].block_count));
306	hfs_dbg_cont(EXTENT, "\n");
307}
308
309static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
310			      u32 alloc_block, u32 block_count)
311{
312	u32 count, start;
313	int i;
314
315	hfsplus_dump_extent(extent);
316	for (i = 0; i < 8; extent++, i++) {
317		count = be32_to_cpu(extent->block_count);
318		if (offset == count) {
319			start = be32_to_cpu(extent->start_block);
320			if (alloc_block != start + count) {
321				if (++i >= 8)
322					return -ENOSPC;
323				extent++;
324				extent->start_block = cpu_to_be32(alloc_block);
325			} else
326				block_count += count;
327			extent->block_count = cpu_to_be32(block_count);
328			return 0;
329		} else if (offset < count)
330			break;
331		offset -= count;
332	}
333	/* panic? */
334	return -EIO;
335}
336
337static int hfsplus_free_extents(struct super_block *sb,
338				struct hfsplus_extent *extent,
339				u32 offset, u32 block_nr)
340{
341	u32 count, start;
342	int i;
343	int err = 0;
344
345	/* Mapping the allocation file may lock the extent tree */
346	WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
347
348	hfsplus_dump_extent(extent);
349	for (i = 0; i < 8; extent++, i++) {
350		count = be32_to_cpu(extent->block_count);
351		if (offset == count)
352			goto found;
353		else if (offset < count)
354			break;
355		offset -= count;
356	}
357	/* panic? */
358	return -EIO;
359found:
360	for (;;) {
361		start = be32_to_cpu(extent->start_block);
362		if (count <= block_nr) {
363			err = hfsplus_block_free(sb, start, count);
364			if (err) {
365				pr_err("can't free extent\n");
366				hfs_dbg(EXTENT, " start: %u count: %u\n",
367					start, count);
368			}
369			extent->block_count = 0;
370			extent->start_block = 0;
371			block_nr -= count;
372		} else {
373			count -= block_nr;
374			err = hfsplus_block_free(sb, start + count, block_nr);
375			if (err) {
376				pr_err("can't free extent\n");
377				hfs_dbg(EXTENT, " start: %u count: %u\n",
378					start, count);
379			}
380			extent->block_count = cpu_to_be32(count);
381			block_nr = 0;
382		}
383		if (!block_nr || !i) {
384			/*
385			 * Try to free all extents and
386			 * return only last error
387			 */
388			return err;
389		}
390		i--;
391		extent--;
392		count = be32_to_cpu(extent->block_count);
393	}
394}
395
396int hfsplus_free_fork(struct super_block *sb, u32 cnid,
397		struct hfsplus_fork_raw *fork, int type)
398{
399	struct hfs_find_data fd;
400	hfsplus_extent_rec ext_entry;
401	u32 total_blocks, blocks, start;
402	int res, i;
403
404	total_blocks = be32_to_cpu(fork->total_blocks);
405	if (!total_blocks)
406		return 0;
407
408	blocks = 0;
409	for (i = 0; i < 8; i++)
410		blocks += be32_to_cpu(fork->extents[i].block_count);
411
412	res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
413	if (res)
414		return res;
415	if (total_blocks == blocks)
416		return 0;
417
418	res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
419	if (res)
420		return res;
421	do {
422		res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
423						total_blocks, type);
424		if (res)
425			break;
426		start = be32_to_cpu(fd.key->ext.start_block);
427		hfs_brec_remove(&fd);
428
429		mutex_unlock(&fd.tree->tree_lock);
430		hfsplus_free_extents(sb, ext_entry, total_blocks - start,
431				     total_blocks);
432		total_blocks = start;
433		mutex_lock(&fd.tree->tree_lock);
434	} while (total_blocks > blocks);
435	hfs_find_exit(&fd);
436
437	return res;
438}
439
440int hfsplus_file_extend(struct inode *inode, bool zeroout)
441{
442	struct super_block *sb = inode->i_sb;
443	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
444	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
445	u32 start, len, goal;
446	int res;
447
448	if (sbi->alloc_file->i_size * 8 <
449	    sbi->total_blocks - sbi->free_blocks + 8) {
450		/* extend alloc file */
451		pr_err_ratelimited("extend alloc file! (%llu,%u,%u)\n",
452				   sbi->alloc_file->i_size * 8,
453				   sbi->total_blocks, sbi->free_blocks);
454		return -ENOSPC;
455	}
456
457	mutex_lock(&hip->extents_lock);
458	if (hip->alloc_blocks == hip->first_blocks)
459		goal = hfsplus_ext_lastblock(hip->first_extents);
460	else {
461		res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
462		if (res)
463			goto out;
464		goal = hfsplus_ext_lastblock(hip->cached_extents);
465	}
466
467	len = hip->clump_blocks;
468	start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
469	if (start >= sbi->total_blocks) {
470		start = hfsplus_block_allocate(sb, goal, 0, &len);
471		if (start >= goal) {
472			res = -ENOSPC;
473			goto out;
474		}
475	}
476
477	if (zeroout) {
478		res = sb_issue_zeroout(sb, start, len, GFP_NOFS);
479		if (res)
480			goto out;
481	}
482
483	hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
484
485	if (hip->alloc_blocks <= hip->first_blocks) {
486		if (!hip->first_blocks) {
487			hfs_dbg(EXTENT, "first extents\n");
488			/* no extents yet */
489			hip->first_extents[0].start_block = cpu_to_be32(start);
490			hip->first_extents[0].block_count = cpu_to_be32(len);
491			res = 0;
492		} else {
493			/* try to append to extents in inode */
494			res = hfsplus_add_extent(hip->first_extents,
495						 hip->alloc_blocks,
496						 start, len);
497			if (res == -ENOSPC)
498				goto insert_extent;
499		}
500		if (!res) {
501			hfsplus_dump_extent(hip->first_extents);
502			hip->first_blocks += len;
503		}
504	} else {
505		res = hfsplus_add_extent(hip->cached_extents,
506					 hip->alloc_blocks - hip->cached_start,
507					 start, len);
508		if (!res) {
509			hfsplus_dump_extent(hip->cached_extents);
510			hip->extent_state |= HFSPLUS_EXT_DIRTY;
511			hip->cached_blocks += len;
512		} else if (res == -ENOSPC)
513			goto insert_extent;
514	}
515out:
516	if (!res) {
517		hip->alloc_blocks += len;
518		mutex_unlock(&hip->extents_lock);
519		hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
520		return 0;
521	}
522	mutex_unlock(&hip->extents_lock);
523	return res;
524
525insert_extent:
526	hfs_dbg(EXTENT, "insert new extent\n");
527	res = hfsplus_ext_write_extent_locked(inode);
528	if (res)
529		goto out;
530
531	memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
532	hip->cached_extents[0].start_block = cpu_to_be32(start);
533	hip->cached_extents[0].block_count = cpu_to_be32(len);
534	hfsplus_dump_extent(hip->cached_extents);
535	hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
536	hip->cached_start = hip->alloc_blocks;
537	hip->cached_blocks = len;
538
539	res = 0;
540	goto out;
541}
542
543void hfsplus_file_truncate(struct inode *inode)
544{
545	struct super_block *sb = inode->i_sb;
546	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
547	struct hfs_find_data fd;
548	u32 alloc_cnt, blk_cnt, start;
549	int res;
550
551	hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
552		inode->i_ino, (long long)hip->phys_size, inode->i_size);
553
554	if (inode->i_size > hip->phys_size) {
555		struct address_space *mapping = inode->i_mapping;
556		struct page *page;
557		void *fsdata = NULL;
558		loff_t size = inode->i_size;
559
560		res = hfsplus_write_begin(NULL, mapping, size, 0,
561					  &page, &fsdata);
562		if (res)
563			return;
564		res = generic_write_end(NULL, mapping, size, 0, 0,
565					page, fsdata);
566		if (res < 0)
567			return;
568		mark_inode_dirty(inode);
569		return;
570	} else if (inode->i_size == hip->phys_size)
571		return;
572
573	blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
574			HFSPLUS_SB(sb)->alloc_blksz_shift;
575
576	mutex_lock(&hip->extents_lock);
577
578	alloc_cnt = hip->alloc_blocks;
579	if (blk_cnt == alloc_cnt)
580		goto out_unlock;
581
582	res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
583	if (res) {
584		mutex_unlock(&hip->extents_lock);
585		/* XXX: We lack error handling of hfsplus_file_truncate() */
586		return;
587	}
588	while (1) {
589		if (alloc_cnt == hip->first_blocks) {
590			mutex_unlock(&fd.tree->tree_lock);
591			hfsplus_free_extents(sb, hip->first_extents,
592					     alloc_cnt, alloc_cnt - blk_cnt);
593			hfsplus_dump_extent(hip->first_extents);
594			hip->first_blocks = blk_cnt;
595			mutex_lock(&fd.tree->tree_lock);
596			break;
597		}
598		res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
599		if (res)
600			break;
601
602		start = hip->cached_start;
603		if (blk_cnt <= start)
604			hfs_brec_remove(&fd);
605		mutex_unlock(&fd.tree->tree_lock);
606		hfsplus_free_extents(sb, hip->cached_extents,
607				     alloc_cnt - start, alloc_cnt - blk_cnt);
608		hfsplus_dump_extent(hip->cached_extents);
609		mutex_lock(&fd.tree->tree_lock);
610		if (blk_cnt > start) {
611			hip->extent_state |= HFSPLUS_EXT_DIRTY;
612			break;
613		}
614		alloc_cnt = start;
615		hip->cached_start = hip->cached_blocks = 0;
616		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
617	}
618	hfs_find_exit(&fd);
619
620	hip->alloc_blocks = blk_cnt;
621out_unlock:
622	mutex_unlock(&hip->extents_lock);
623	hip->phys_size = inode->i_size;
624	hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
625		sb->s_blocksize_bits;
626	inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
627	hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
628}
629