1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  linux/fs/ufs/util.c
4 *
5 * Copyright (C) 1998
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
8 */
9
10#include <linux/string.h>
11#include <linux/slab.h>
12#include <linux/buffer_head.h>
13
14#include "ufs_fs.h"
15#include "ufs.h"
16#include "swab.h"
17#include "util.h"
18
19struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
20	struct super_block *sb, u64 fragment, u64 size)
21{
22	struct ufs_buffer_head * ubh;
23	unsigned i, j ;
24	u64  count = 0;
25	if (size & ~uspi->s_fmask)
26		return NULL;
27	count = size >> uspi->s_fshift;
28	if (count > UFS_MAXFRAG)
29		return NULL;
30	ubh = kmalloc (sizeof (struct ufs_buffer_head), GFP_NOFS);
31	if (!ubh)
32		return NULL;
33	ubh->fragment = fragment;
34	ubh->count = count;
35	for (i = 0; i < count; i++)
36		if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
37			goto failed;
38	for (; i < UFS_MAXFRAG; i++)
39		ubh->bh[i] = NULL;
40	return ubh;
41failed:
42	for (j = 0; j < i; j++)
43		brelse (ubh->bh[j]);
44	kfree(ubh);
45	return NULL;
46}
47
48struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
49	struct super_block *sb, u64 fragment, u64 size)
50{
51	unsigned i, j;
52	u64 count = 0;
53	if (size & ~uspi->s_fmask)
54		return NULL;
55	count = size >> uspi->s_fshift;
56	if (count <= 0 || count > UFS_MAXFRAG)
57		return NULL;
58	USPI_UBH(uspi)->fragment = fragment;
59	USPI_UBH(uspi)->count = count;
60	for (i = 0; i < count; i++)
61		if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i)))
62			goto failed;
63	for (; i < UFS_MAXFRAG; i++)
64		USPI_UBH(uspi)->bh[i] = NULL;
65	return USPI_UBH(uspi);
66failed:
67	for (j = 0; j < i; j++)
68		brelse (USPI_UBH(uspi)->bh[j]);
69	return NULL;
70}
71
72void ubh_brelse (struct ufs_buffer_head * ubh)
73{
74	unsigned i;
75	if (!ubh)
76		return;
77	for (i = 0; i < ubh->count; i++)
78		brelse (ubh->bh[i]);
79	kfree (ubh);
80}
81
82void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
83{
84	unsigned i;
85	if (!USPI_UBH(uspi))
86		return;
87	for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) {
88		brelse (USPI_UBH(uspi)->bh[i]);
89		USPI_UBH(uspi)->bh[i] = NULL;
90	}
91}
92
93void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
94{
95	unsigned i;
96	if (!ubh)
97		return;
98	for ( i = 0; i < ubh->count; i++ )
99		mark_buffer_dirty (ubh->bh[i]);
100}
101
102void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
103{
104	unsigned i;
105	if (!ubh)
106		return;
107	if (flag) {
108		for ( i = 0; i < ubh->count; i++ )
109			set_buffer_uptodate (ubh->bh[i]);
110	} else {
111		for ( i = 0; i < ubh->count; i++ )
112			clear_buffer_uptodate (ubh->bh[i]);
113	}
114}
115
116void ubh_sync_block(struct ufs_buffer_head *ubh)
117{
118	if (ubh) {
119		unsigned i;
120
121		for (i = 0; i < ubh->count; i++)
122			write_dirty_buffer(ubh->bh[i], 0);
123
124		for (i = 0; i < ubh->count; i++)
125			wait_on_buffer(ubh->bh[i]);
126	}
127}
128
129void ubh_bforget (struct ufs_buffer_head * ubh)
130{
131	unsigned i;
132	if (!ubh)
133		return;
134	for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] )
135		bforget (ubh->bh[i]);
136}
137
138int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
139{
140	unsigned i;
141	unsigned result = 0;
142	if (!ubh)
143		return 0;
144	for ( i = 0; i < ubh->count; i++ )
145		result |= buffer_dirty(ubh->bh[i]);
146	return result;
147}
148
149void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi,
150	unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size)
151{
152	unsigned len, bhno;
153	if (size > (ubh->count << uspi->s_fshift))
154		size = ubh->count << uspi->s_fshift;
155	bhno = 0;
156	while (size) {
157		len = min_t(unsigned int, size, uspi->s_fsize);
158		memcpy (mem, ubh->bh[bhno]->b_data, len);
159		mem += uspi->s_fsize;
160		size -= len;
161		bhno++;
162	}
163}
164
165void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi,
166	struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size)
167{
168	unsigned len, bhno;
169	if (size > (ubh->count << uspi->s_fshift))
170		size = ubh->count << uspi->s_fshift;
171	bhno = 0;
172	while (size) {
173		len = min_t(unsigned int, size, uspi->s_fsize);
174		memcpy (ubh->bh[bhno]->b_data, mem, len);
175		mem += uspi->s_fsize;
176		size -= len;
177		bhno++;
178	}
179}
180
181dev_t
182ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi)
183{
184	__u32 fs32;
185	dev_t dev;
186
187	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
188		fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[1]);
189	else
190		fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[0]);
191	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
192	case UFS_ST_SUNx86:
193	case UFS_ST_SUN:
194		if ((fs32 & 0xffff0000) == 0 ||
195		    (fs32 & 0xffff0000) == 0xffff0000)
196			dev = old_decode_dev(fs32 & 0x7fff);
197		else
198			dev = MKDEV(sysv_major(fs32), sysv_minor(fs32));
199		break;
200
201	default:
202		dev = old_decode_dev(fs32);
203		break;
204	}
205	return dev;
206}
207
208void
209ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev)
210{
211	__u32 fs32;
212
213	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
214	case UFS_ST_SUNx86:
215	case UFS_ST_SUN:
216		fs32 = sysv_encode_dev(dev);
217		if ((fs32 & 0xffff8000) == 0) {
218			fs32 = old_encode_dev(dev);
219		}
220		break;
221
222	default:
223		fs32 = old_encode_dev(dev);
224		break;
225	}
226	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
227		ufsi->i_u1.i_data[1] = cpu_to_fs32(sb, fs32);
228	else
229		ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32);
230}
231
232/**
233 * ufs_get_locked_folio() - locate, pin and lock a pagecache folio, if not exist
234 * read it from disk.
235 * @mapping: the address_space to search
236 * @index: the page index
237 *
238 * Locates the desired pagecache folio, if not exist we'll read it,
239 * locks it, increments its reference
240 * count and returns its address.
241 *
242 */
243struct folio *ufs_get_locked_folio(struct address_space *mapping,
244				 pgoff_t index)
245{
246	struct inode *inode = mapping->host;
247	struct folio *folio = filemap_lock_folio(mapping, index);
248	if (IS_ERR(folio)) {
249		folio = read_mapping_folio(mapping, index, NULL);
250
251		if (IS_ERR(folio)) {
252			printk(KERN_ERR "ufs_change_blocknr: read_mapping_folio error: ino %lu, index: %lu\n",
253			       mapping->host->i_ino, index);
254			return folio;
255		}
256
257		folio_lock(folio);
258
259		if (unlikely(folio->mapping == NULL)) {
260			/* Truncate got there first */
261			folio_unlock(folio);
262			folio_put(folio);
263			return NULL;
264		}
265	}
266	if (!folio_buffers(folio))
267		create_empty_buffers(folio, 1 << inode->i_blkbits, 0);
268	return folio;
269}
270