1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3*4882a593Smuzhiyun */
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/string.h>
6*4882a593Smuzhiyun #include <linux/time.h>
7*4882a593Smuzhiyun #include <linux/uuid.h>
8*4882a593Smuzhiyun #include "reiserfs.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /* find where objectid map starts */
11*4882a593Smuzhiyun #define objectid_map(s,rs) (old_format_only (s) ? \
12*4882a593Smuzhiyun (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
13*4882a593Smuzhiyun (__le32 *)((rs) + 1))
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifdef CONFIG_REISERFS_CHECK
16*4882a593Smuzhiyun
check_objectid_map(struct super_block * s,__le32 * map)17*4882a593Smuzhiyun static void check_objectid_map(struct super_block *s, __le32 * map)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun if (le32_to_cpu(map[0]) != 1)
20*4882a593Smuzhiyun reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
21*4882a593Smuzhiyun (long unsigned int)le32_to_cpu(map[0]));
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* FIXME: add something else here */
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #else
check_objectid_map(struct super_block * s,__le32 * map)27*4882a593Smuzhiyun static void check_objectid_map(struct super_block *s, __le32 * map)
28*4882a593Smuzhiyun {;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * When we allocate objectids we allocate the first unused objectid.
34*4882a593Smuzhiyun * Each sequence of objectids in use (the odd sequences) is followed
35*4882a593Smuzhiyun * by a sequence of objectids not in use (the even sequences). We
36*4882a593Smuzhiyun * only need to record the last objectid in each of these sequences
37*4882a593Smuzhiyun * (both the odd and even sequences) in order to fully define the
38*4882a593Smuzhiyun * boundaries of the sequences. A consequence of allocating the first
39*4882a593Smuzhiyun * objectid not in use is that under most conditions this scheme is
40*4882a593Smuzhiyun * extremely compact. The exception is immediately after a sequence
41*4882a593Smuzhiyun * of operations which deletes a large number of objects of
42*4882a593Smuzhiyun * non-sequential objectids, and even then it will become compact
43*4882a593Smuzhiyun * again as soon as more objects are created. Note that many
44*4882a593Smuzhiyun * interesting optimizations of layout could result from complicating
45*4882a593Smuzhiyun * objectid assignment, but we have deferred making them for now.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* get unique object identifier */
reiserfs_get_unused_objectid(struct reiserfs_transaction_handle * th)49*4882a593Smuzhiyun __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct super_block *s = th->t_super;
52*4882a593Smuzhiyun struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
53*4882a593Smuzhiyun __le32 *map = objectid_map(s, rs);
54*4882a593Smuzhiyun __u32 unused_objectid;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun check_objectid_map(s, map);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
61*4882a593Smuzhiyun /* comment needed -Hans */
62*4882a593Smuzhiyun unused_objectid = le32_to_cpu(map[1]);
63*4882a593Smuzhiyun if (unused_objectid == U32_MAX) {
64*4882a593Smuzhiyun reiserfs_warning(s, "reiserfs-15100", "no more object ids");
65*4882a593Smuzhiyun reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
66*4882a593Smuzhiyun return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * This incrementation allocates the first unused objectid. That
71*4882a593Smuzhiyun * is to say, the first entry on the objectid map is the first
72*4882a593Smuzhiyun * unused objectid, and by incrementing it we use it. See below
73*4882a593Smuzhiyun * where we check to see if we eliminated a sequence of unused
74*4882a593Smuzhiyun * objectids....
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun map[1] = cpu_to_le32(unused_objectid + 1);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * Now we check to see if we eliminated the last remaining member of
80*4882a593Smuzhiyun * the first even sequence (and can eliminate the sequence by
81*4882a593Smuzhiyun * eliminating its last objectid from oids), and can collapse the
82*4882a593Smuzhiyun * first two odd sequences into one sequence. If so, then the net
83*4882a593Smuzhiyun * result is to eliminate a pair of objectids from oids. We do this
84*4882a593Smuzhiyun * by shifting the entire map to the left.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
87*4882a593Smuzhiyun memmove(map + 1, map + 3,
88*4882a593Smuzhiyun (sb_oid_cursize(rs) - 3) * sizeof(__u32));
89*4882a593Smuzhiyun set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
93*4882a593Smuzhiyun return unused_objectid;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* makes object identifier unused */
reiserfs_release_objectid(struct reiserfs_transaction_handle * th,__u32 objectid_to_release)97*4882a593Smuzhiyun void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
98*4882a593Smuzhiyun __u32 objectid_to_release)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct super_block *s = th->t_super;
101*4882a593Smuzhiyun struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
102*4882a593Smuzhiyun __le32 *map = objectid_map(s, rs);
103*4882a593Smuzhiyun int i = 0;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
106*4882a593Smuzhiyun /*return; */
107*4882a593Smuzhiyun check_objectid_map(s, map);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
110*4882a593Smuzhiyun journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * start at the beginning of the objectid map (i = 0) and go to
114*4882a593Smuzhiyun * the end of it (i = disk_sb->s_oid_cursize). Linear search is
115*4882a593Smuzhiyun * what we use, though it is possible that binary search would be
116*4882a593Smuzhiyun * more efficient after performing lots of deletions (which is
117*4882a593Smuzhiyun * when oids is large.) We only check even i's.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun while (i < sb_oid_cursize(rs)) {
120*4882a593Smuzhiyun if (objectid_to_release == le32_to_cpu(map[i])) {
121*4882a593Smuzhiyun /* This incrementation unallocates the objectid. */
122*4882a593Smuzhiyun le32_add_cpu(&map[i], 1);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * Did we unallocate the last member of an
126*4882a593Smuzhiyun * odd sequence, and can shrink oids?
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun if (map[i] == map[i + 1]) {
129*4882a593Smuzhiyun /* shrink objectid map */
130*4882a593Smuzhiyun memmove(map + i, map + i + 2,
131*4882a593Smuzhiyun (sb_oid_cursize(rs) - i -
132*4882a593Smuzhiyun 2) * sizeof(__u32));
133*4882a593Smuzhiyun set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun RFALSE(sb_oid_cursize(rs) < 2 ||
136*4882a593Smuzhiyun sb_oid_cursize(rs) > sb_oid_maxsize(rs),
137*4882a593Smuzhiyun "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
138*4882a593Smuzhiyun sb_oid_cursize(rs), sb_oid_maxsize(rs));
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun return;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (objectid_to_release > le32_to_cpu(map[i]) &&
144*4882a593Smuzhiyun objectid_to_release < le32_to_cpu(map[i + 1])) {
145*4882a593Smuzhiyun /* size of objectid map is not changed */
146*4882a593Smuzhiyun if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
147*4882a593Smuzhiyun le32_add_cpu(&map[i + 1], -1);
148*4882a593Smuzhiyun return;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * JDM comparing two little-endian values for
153*4882a593Smuzhiyun * equality -- safe
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun * objectid map must be expanded, but
157*4882a593Smuzhiyun * there is no space
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
160*4882a593Smuzhiyun PROC_INFO_INC(s, leaked_oid);
161*4882a593Smuzhiyun return;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* expand the objectid map */
165*4882a593Smuzhiyun memmove(map + i + 3, map + i + 1,
166*4882a593Smuzhiyun (sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
167*4882a593Smuzhiyun map[i + 1] = cpu_to_le32(objectid_to_release);
168*4882a593Smuzhiyun map[i + 2] = cpu_to_le32(objectid_to_release + 1);
169*4882a593Smuzhiyun set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
170*4882a593Smuzhiyun return;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun i += 2;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
176*4882a593Smuzhiyun (long unsigned)objectid_to_release);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
reiserfs_convert_objectid_map_v1(struct super_block * s)179*4882a593Smuzhiyun int reiserfs_convert_objectid_map_v1(struct super_block *s)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK(s);
182*4882a593Smuzhiyun int cur_size = sb_oid_cursize(disk_sb);
183*4882a593Smuzhiyun int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
184*4882a593Smuzhiyun int old_max = sb_oid_maxsize(disk_sb);
185*4882a593Smuzhiyun struct reiserfs_super_block_v1 *disk_sb_v1;
186*4882a593Smuzhiyun __le32 *objectid_map;
187*4882a593Smuzhiyun int i;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun disk_sb_v1 =
190*4882a593Smuzhiyun (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
191*4882a593Smuzhiyun objectid_map = (__le32 *) (disk_sb_v1 + 1);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (cur_size > new_size) {
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * mark everyone used that was listed as free at
196*4882a593Smuzhiyun * the end of the objectid map
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun objectid_map[new_size - 1] = objectid_map[cur_size - 1];
199*4882a593Smuzhiyun set_sb_oid_cursize(disk_sb, new_size);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun /* move the smaller objectid map past the end of the new super */
202*4882a593Smuzhiyun for (i = new_size - 1; i >= 0; i--) {
203*4882a593Smuzhiyun objectid_map[i + (old_max - new_size)] = objectid_map[i];
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* set the max size so we don't overflow later */
207*4882a593Smuzhiyun set_sb_oid_maxsize(disk_sb, new_size);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Zero out label and generate random UUID */
210*4882a593Smuzhiyun memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label));
211*4882a593Smuzhiyun generate_random_uuid(disk_sb->s_uuid);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* finally, zero out the unused chunk of the new super */
214*4882a593Smuzhiyun memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused));
215*4882a593Smuzhiyun return 0;
216*4882a593Smuzhiyun }
217