1// SPDX-License-Identifier: GPL-2.0
2/*
3 *	fs/bfs/file.c
4 *	BFS file operations.
5 *	Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com>
6 *
7 *	Make the file block allocation algorithm understand the size
8 *	of the underlying block device.
9 *	Copyright (C) 2007 Dmitri Vorobiev <dmitri.vorobiev@gmail.com>
10 *
11 */
12
13#include <linux/fs.h>
14#include <linux/mpage.h>
15#include <linux/buffer_head.h>
16#include "bfs.h"
17
18#undef DEBUG
19
20#ifdef DEBUG
21#define dprintf(x...)	printf(x)
22#else
23#define dprintf(x...)
24#endif
25
26const struct file_operations bfs_file_operations = {
27	.llseek 	= generic_file_llseek,
28	.read_iter	= generic_file_read_iter,
29	.write_iter	= generic_file_write_iter,
30	.mmap		= generic_file_mmap,
31	.splice_read	= filemap_splice_read,
32};
33
34static int bfs_move_block(unsigned long from, unsigned long to,
35					struct super_block *sb)
36{
37	struct buffer_head *bh, *new;
38
39	bh = sb_bread(sb, from);
40	if (!bh)
41		return -EIO;
42	new = sb_getblk(sb, to);
43	memcpy(new->b_data, bh->b_data, bh->b_size);
44	mark_buffer_dirty(new);
45	bforget(bh);
46	brelse(new);
47	return 0;
48}
49
50static int bfs_move_blocks(struct super_block *sb, unsigned long start,
51				unsigned long end, unsigned long where)
52{
53	unsigned long i;
54
55	dprintf("%08lx-%08lx->%08lx\n", start, end, where);
56	for (i = start; i <= end; i++)
57		if(bfs_move_block(i, where + i, sb)) {
58			dprintf("failed to move block %08lx -> %08lx\n", i,
59								where + i);
60			return -EIO;
61		}
62	return 0;
63}
64
65static int bfs_get_block(struct inode *inode, sector_t block,
66			struct buffer_head *bh_result, int create)
67{
68	unsigned long phys;
69	int err;
70	struct super_block *sb = inode->i_sb;
71	struct bfs_sb_info *info = BFS_SB(sb);
72	struct bfs_inode_info *bi = BFS_I(inode);
73
74	phys = bi->i_sblock + block;
75	if (!create) {
76		if (phys <= bi->i_eblock) {
77			dprintf("c=%d, b=%08lx, phys=%09lx (granted)\n",
78                                create, (unsigned long)block, phys);
79			map_bh(bh_result, sb, phys);
80		}
81		return 0;
82	}
83
84	/*
85	 * If the file is not empty and the requested block is within the
86	 * range of blocks allocated for this file, we can grant it.
87	 */
88	if (bi->i_sblock && (phys <= bi->i_eblock)) {
89		dprintf("c=%d, b=%08lx, phys=%08lx (interim block granted)\n",
90				create, (unsigned long)block, phys);
91		map_bh(bh_result, sb, phys);
92		return 0;
93	}
94
95	/* The file will be extended, so let's see if there is enough space. */
96	if (phys >= info->si_blocks)
97		return -ENOSPC;
98
99	/* The rest has to be protected against itself. */
100	mutex_lock(&info->bfs_lock);
101
102	/*
103	 * If the last data block for this file is the last allocated
104	 * block, we can extend the file trivially, without moving it
105	 * anywhere.
106	 */
107	if (bi->i_eblock == info->si_lf_eblk) {
108		dprintf("c=%d, b=%08lx, phys=%08lx (simple extension)\n",
109				create, (unsigned long)block, phys);
110		map_bh(bh_result, sb, phys);
111		info->si_freeb -= phys - bi->i_eblock;
112		info->si_lf_eblk = bi->i_eblock = phys;
113		mark_inode_dirty(inode);
114		err = 0;
115		goto out;
116	}
117
118	/* Ok, we have to move this entire file to the next free block. */
119	phys = info->si_lf_eblk + 1;
120	if (phys + block >= info->si_blocks) {
121		err = -ENOSPC;
122		goto out;
123	}
124
125	if (bi->i_sblock) {
126		err = bfs_move_blocks(inode->i_sb, bi->i_sblock,
127						bi->i_eblock, phys);
128		if (err) {
129			dprintf("failed to move ino=%08lx -> fs corruption\n",
130								inode->i_ino);
131			goto out;
132		}
133	} else
134		err = 0;
135
136	dprintf("c=%d, b=%08lx, phys=%08lx (moved)\n",
137                create, (unsigned long)block, phys);
138	bi->i_sblock = phys;
139	phys += block;
140	info->si_lf_eblk = bi->i_eblock = phys;
141
142	/*
143	 * This assumes nothing can write the inode back while we are here
144	 * and thus update inode->i_blocks! (XXX)
145	 */
146	info->si_freeb -= bi->i_eblock - bi->i_sblock + 1 - inode->i_blocks;
147	mark_inode_dirty(inode);
148	map_bh(bh_result, sb, phys);
149out:
150	mutex_unlock(&info->bfs_lock);
151	return err;
152}
153
154static int bfs_writepages(struct address_space *mapping,
155		struct writeback_control *wbc)
156{
157	return mpage_writepages(mapping, wbc, bfs_get_block);
158}
159
160static int bfs_read_folio(struct file *file, struct folio *folio)
161{
162	return block_read_full_folio(folio, bfs_get_block);
163}
164
165static void bfs_write_failed(struct address_space *mapping, loff_t to)
166{
167	struct inode *inode = mapping->host;
168
169	if (to > inode->i_size)
170		truncate_pagecache(inode, inode->i_size);
171}
172
173static int bfs_write_begin(struct file *file, struct address_space *mapping,
174			loff_t pos, unsigned len,
175			struct page **pagep, void **fsdata)
176{
177	int ret;
178
179	ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block);
180	if (unlikely(ret))
181		bfs_write_failed(mapping, pos + len);
182
183	return ret;
184}
185
186static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
187{
188	return generic_block_bmap(mapping, block, bfs_get_block);
189}
190
191const struct address_space_operations bfs_aops = {
192	.dirty_folio	= block_dirty_folio,
193	.invalidate_folio = block_invalidate_folio,
194	.read_folio	= bfs_read_folio,
195	.writepages	= bfs_writepages,
196	.write_begin	= bfs_write_begin,
197	.write_end	= generic_write_end,
198	.migrate_folio	= buffer_migrate_folio,
199	.bmap		= bfs_bmap,
200};
201
202const struct inode_operations bfs_file_inops;
203