1/*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4
5#include <linux/string.h>
6#include <linux/random.h>
7#include <linux/time.h>
8#include <linux/reiserfs_fs.h>
9#include <linux/reiserfs_fs_sb.h>
10
11// find where objectid map starts
12#define objectid_map(s,rs) (old_format_only (s) ? \
13                         (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
14			 (__le32 *)((rs) + 1))
15
16#ifdef CONFIG_REISERFS_CHECK
17
18static void check_objectid_map(struct super_block *s, __le32 * map)
19{
20	if (le32_to_cpu(map[0]) != 1)
21		reiserfs_panic(s,
22			       "vs-15010: check_objectid_map: map corrupted: %lx",
23			       (long unsigned int)le32_to_cpu(map[0]));
24
25}
26
27#else
28static void check_objectid_map(struct super_block *s, __le32 * map)
29{;
30}
31#endif
32
33/* When we allocate objectids we allocate the first unused objectid.
34   Each sequence of objectids in use (the odd sequences) is followed
35   by a sequence of objectids not in use (the even sequences).  We
36   only need to record the last objectid in each of these sequences
37   (both the odd and even sequences) in order to fully define the
38   boundaries of the sequences.  A consequence of allocating the first
39   objectid not in use is that under most conditions this scheme is
40   extremely compact.  The exception is immediately after a sequence
41   of operations which deletes a large number of objects of
42   non-sequential objectids, and even then it will become compact
43   again as soon as more objects are created.  Note that many
44   interesting optimizations of layout could result from complicating
45   objectid assignment, but we have deferred making them for now. */
46
47/* get unique object identifier */
48__u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
49{
50	struct super_block *s = th->t_super;
51	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
52	__le32 *map = objectid_map(s, rs);
53	__u32 unused_objectid;
54
55	BUG_ON(!th->t_trans_id);
56
57	check_objectid_map(s, map);
58
59	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
60	/* comment needed -Hans */
61	unused_objectid = le32_to_cpu(map[1]);
62	if (unused_objectid == U32_MAX) {
63		reiserfs_warning(s, "%s: no more object ids", __FUNCTION__);
64		reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
65		return 0;
66	}
67
68	/* This incrementation allocates the first unused objectid. That
69	   is to say, the first entry on the objectid map is the first
70	   unused objectid, and by incrementing it we use it.  See below
71	   where we check to see if we eliminated a sequence of unused
72	   objectids.... */
73	map[1] = cpu_to_le32(unused_objectid + 1);
74
75	/* Now we check to see if we eliminated the last remaining member of
76	   the first even sequence (and can eliminate the sequence by
77	   eliminating its last objectid from oids), and can collapse the
78	   first two odd sequences into one sequence.  If so, then the net
79	   result is to eliminate a pair of objectids from oids.  We do this
80	   by shifting the entire map to the left. */
81	if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
82		memmove(map + 1, map + 3,
83			(sb_oid_cursize(rs) - 3) * sizeof(__u32));
84		set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
85	}
86
87	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
88	return unused_objectid;
89}
90
91/* makes object identifier unused */
92void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
93			       __u32 objectid_to_release)
94{
95	struct super_block *s = th->t_super;
96	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
97	__le32 *map = objectid_map(s, rs);
98	int i = 0;
99
100	BUG_ON(!th->t_trans_id);
101	//return;
102	check_objectid_map(s, map);
103
104	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
105	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
106
107	/* start at the beginning of the objectid map (i = 0) and go to
108	   the end of it (i = disk_sb->s_oid_cursize).  Linear search is
109	   what we use, though it is possible that binary search would be
110	   more efficient after performing lots of deletions (which is
111	   when oids is large.)  We only check even i's. */
112	while (i < sb_oid_cursize(rs)) {
113		if (objectid_to_release == le32_to_cpu(map[i])) {
114			/* This incrementation unallocates the objectid. */
115			//map[i]++;
116			map[i] = cpu_to_le32(le32_to_cpu(map[i]) + 1);
117
118			/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
119			if (map[i] == map[i + 1]) {
120				/* shrink objectid map */
121				memmove(map + i, map + i + 2,
122					(sb_oid_cursize(rs) - i -
123					 2) * sizeof(__u32));
124				//disk_sb->s_oid_cursize -= 2;
125				set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
126
127				RFALSE(sb_oid_cursize(rs) < 2 ||
128				       sb_oid_cursize(rs) > sb_oid_maxsize(rs),
129				       "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
130				       sb_oid_cursize(rs), sb_oid_maxsize(rs));
131			}
132			return;
133		}
134
135		if (objectid_to_release > le32_to_cpu(map[i]) &&
136		    objectid_to_release < le32_to_cpu(map[i + 1])) {
137			/* size of objectid map is not changed */
138			if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
139				//objectid_map[i+1]--;
140				map[i + 1] =
141				    cpu_to_le32(le32_to_cpu(map[i + 1]) - 1);
142				return;
143			}
144
145			/* JDM comparing two little-endian values for equality -- safe */
146			if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
147				/* objectid map must be expanded, but there is no space */
148				PROC_INFO_INC(s, leaked_oid);
149				return;
150			}
151
152			/* expand the objectid map */
153			memmove(map + i + 3, map + i + 1,
154				(sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
155			map[i + 1] = cpu_to_le32(objectid_to_release);
156			map[i + 2] = cpu_to_le32(objectid_to_release + 1);
157			set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
158			return;
159		}
160		i += 2;
161	}
162
163	reiserfs_warning(s,
164			 "vs-15011: reiserfs_release_objectid: tried to free free object id (%lu)",
165			 (long unsigned)objectid_to_release);
166}
167
168int reiserfs_convert_objectid_map_v1(struct super_block *s)
169{
170	struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK(s);
171	int cur_size = sb_oid_cursize(disk_sb);
172	int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
173	int old_max = sb_oid_maxsize(disk_sb);
174	struct reiserfs_super_block_v1 *disk_sb_v1;
175	__le32 *objectid_map, *new_objectid_map;
176	int i;
177
178	disk_sb_v1 =
179	    (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
180	objectid_map = (__le32 *) (disk_sb_v1 + 1);
181	new_objectid_map = (__le32 *) (disk_sb + 1);
182
183	if (cur_size > new_size) {
184		/* mark everyone used that was listed as free at the end of the objectid
185		 ** map
186		 */
187		objectid_map[new_size - 1] = objectid_map[cur_size - 1];
188		set_sb_oid_cursize(disk_sb, new_size);
189	}
190	/* move the smaller objectid map past the end of the new super */
191	for (i = new_size - 1; i >= 0; i--) {
192		objectid_map[i + (old_max - new_size)] = objectid_map[i];
193	}
194
195	/* set the max size so we don't overflow later */
196	set_sb_oid_maxsize(disk_sb, new_size);
197
198	/* Zero out label and generate random UUID */
199	memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label));
200	generate_random_uuid(disk_sb->s_uuid);
201
202	/* finally, zero out the unused chunk of the new super */
203	memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused));
204	return 0;
205}
206