1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* -*- mode: c; c-basic-offset: 8; -*-
3*4882a593Smuzhiyun * vim: noexpandtab sw=8 ts=8 sts=0:
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * localalloc.c
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Node local data allocation
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/fs.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/highmem.h>
16*4882a593Smuzhiyun #include <linux/bitops.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <cluster/masklog.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "ocfs2.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "alloc.h"
23*4882a593Smuzhiyun #include "blockcheck.h"
24*4882a593Smuzhiyun #include "dlmglue.h"
25*4882a593Smuzhiyun #include "inode.h"
26*4882a593Smuzhiyun #include "journal.h"
27*4882a593Smuzhiyun #include "localalloc.h"
28*4882a593Smuzhiyun #include "suballoc.h"
29*4882a593Smuzhiyun #include "super.h"
30*4882a593Smuzhiyun #include "sysfile.h"
31*4882a593Smuzhiyun #include "ocfs2_trace.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include "buffer_head_io.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab))
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
40*4882a593Smuzhiyun struct ocfs2_dinode *alloc,
41*4882a593Smuzhiyun u32 *numbits,
42*4882a593Smuzhiyun struct ocfs2_alloc_reservation *resv);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
47*4882a593Smuzhiyun handle_t *handle,
48*4882a593Smuzhiyun struct ocfs2_dinode *alloc,
49*4882a593Smuzhiyun struct inode *main_bm_inode,
50*4882a593Smuzhiyun struct buffer_head *main_bm_bh);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
53*4882a593Smuzhiyun struct ocfs2_alloc_context **ac,
54*4882a593Smuzhiyun struct inode **bitmap_inode,
55*4882a593Smuzhiyun struct buffer_head **bitmap_bh);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
58*4882a593Smuzhiyun handle_t *handle,
59*4882a593Smuzhiyun struct ocfs2_alloc_context *ac);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
62*4882a593Smuzhiyun struct inode *local_alloc_inode);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * ocfs2_la_default_mb() - determine a default size, in megabytes of
66*4882a593Smuzhiyun * the local alloc.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Generally, we'd like to pick as large a local alloc as
69*4882a593Smuzhiyun * possible. Performance on large workloads tends to scale
70*4882a593Smuzhiyun * proportionally to la size. In addition to that, the reservations
71*4882a593Smuzhiyun * code functions more efficiently as it can reserve more windows for
72*4882a593Smuzhiyun * write.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * Some things work against us when trying to choose a large local alloc:
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * - We need to ensure our sizing is picked to leave enough space in
77*4882a593Smuzhiyun * group descriptors for other allocations (such as block groups,
78*4882a593Smuzhiyun * etc). Picking default sizes which are a multiple of 4 could help
79*4882a593Smuzhiyun * - block groups are allocated in 2mb and 4mb chunks.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * - Likewise, we don't want to starve other nodes of bits on small
82*4882a593Smuzhiyun * file systems. This can easily be taken care of by limiting our
83*4882a593Smuzhiyun * default to a reasonable size (256M) on larger cluster sizes.
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * - Some file systems can't support very large sizes - 4k and 8k in
86*4882a593Smuzhiyun * particular are limited to less than 128 and 256 megabytes respectively.
87*4882a593Smuzhiyun *
88*4882a593Smuzhiyun * The following reference table shows group descriptor and local
89*4882a593Smuzhiyun * alloc maximums at various cluster sizes (4k blocksize)
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * csize: 4K group: 126M la: 121M
92*4882a593Smuzhiyun * csize: 8K group: 252M la: 243M
93*4882a593Smuzhiyun * csize: 16K group: 504M la: 486M
94*4882a593Smuzhiyun * csize: 32K group: 1008M la: 972M
95*4882a593Smuzhiyun * csize: 64K group: 2016M la: 1944M
96*4882a593Smuzhiyun * csize: 128K group: 4032M la: 3888M
97*4882a593Smuzhiyun * csize: 256K group: 8064M la: 7776M
98*4882a593Smuzhiyun * csize: 512K group: 16128M la: 15552M
99*4882a593Smuzhiyun * csize: 1024K group: 32256M la: 31104M
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun #define OCFS2_LA_MAX_DEFAULT_MB 256
102*4882a593Smuzhiyun #define OCFS2_LA_OLD_DEFAULT 8
ocfs2_la_default_mb(struct ocfs2_super * osb)103*4882a593Smuzhiyun unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun unsigned int la_mb;
106*4882a593Smuzhiyun unsigned int gd_mb;
107*4882a593Smuzhiyun unsigned int la_max_mb;
108*4882a593Smuzhiyun unsigned int megs_per_slot;
109*4882a593Smuzhiyun struct super_block *sb = osb->sb;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun gd_mb = ocfs2_clusters_to_megabytes(osb->sb,
112*4882a593Smuzhiyun 8 * ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat));
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * This takes care of files systems with very small group
116*4882a593Smuzhiyun * descriptors - 512 byte blocksize at cluster sizes lower
117*4882a593Smuzhiyun * than 16K and also 1k blocksize with 4k cluster size.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192)
120*4882a593Smuzhiyun || (sb->s_blocksize == 1024 && osb->s_clustersize == 4096))
121*4882a593Smuzhiyun return OCFS2_LA_OLD_DEFAULT;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * Leave enough room for some block groups and make the final
125*4882a593Smuzhiyun * value we work from a multiple of 4.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun gd_mb -= 16;
128*4882a593Smuzhiyun gd_mb &= 0xFFFFFFFB;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun la_mb = gd_mb;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * Keep window sizes down to a reasonable default
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) {
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * Some clustersize / blocksize combinations will have
138*4882a593Smuzhiyun * given us a larger than OCFS2_LA_MAX_DEFAULT_MB
139*4882a593Smuzhiyun * default size, but get poor distribution when
140*4882a593Smuzhiyun * limited to exactly 256 megabytes.
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * As an example, 16K clustersize at 4K blocksize
143*4882a593Smuzhiyun * gives us a cluster group size of 504M. Paring the
144*4882a593Smuzhiyun * local alloc size down to 256 however, would give us
145*4882a593Smuzhiyun * only one window and around 200MB left in the
146*4882a593Smuzhiyun * cluster group. Instead, find the first size below
147*4882a593Smuzhiyun * 256 which would give us an even distribution.
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * Larger cluster group sizes actually work out pretty
150*4882a593Smuzhiyun * well when pared to 256, so we don't have to do this
151*4882a593Smuzhiyun * for any group that fits more than two
152*4882a593Smuzhiyun * OCFS2_LA_MAX_DEFAULT_MB windows.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB))
155*4882a593Smuzhiyun la_mb = 256;
156*4882a593Smuzhiyun else {
157*4882a593Smuzhiyun unsigned int gd_mult = gd_mb;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun while (gd_mult > 256)
160*4882a593Smuzhiyun gd_mult = gd_mult >> 1;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun la_mb = gd_mult;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots;
167*4882a593Smuzhiyun megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot);
168*4882a593Smuzhiyun /* Too many nodes, too few disk clusters. */
169*4882a593Smuzhiyun if (megs_per_slot < la_mb)
170*4882a593Smuzhiyun la_mb = megs_per_slot;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* We can't store more bits than we can in a block. */
173*4882a593Smuzhiyun la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
174*4882a593Smuzhiyun ocfs2_local_alloc_size(sb) * 8);
175*4882a593Smuzhiyun if (la_mb > la_max_mb)
176*4882a593Smuzhiyun la_mb = la_max_mb;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return la_mb;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
ocfs2_la_set_sizes(struct ocfs2_super * osb,int requested_mb)181*4882a593Smuzhiyun void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct super_block *sb = osb->sb;
184*4882a593Smuzhiyun unsigned int la_default_mb = ocfs2_la_default_mb(osb);
185*4882a593Smuzhiyun unsigned int la_max_mb;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun la_max_mb = ocfs2_clusters_to_megabytes(sb,
188*4882a593Smuzhiyun ocfs2_local_alloc_size(sb) * 8);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (requested_mb == -1) {
193*4882a593Smuzhiyun /* No user request - use defaults */
194*4882a593Smuzhiyun osb->local_alloc_default_bits =
195*4882a593Smuzhiyun ocfs2_megabytes_to_clusters(sb, la_default_mb);
196*4882a593Smuzhiyun } else if (requested_mb > la_max_mb) {
197*4882a593Smuzhiyun /* Request is too big, we give the maximum available */
198*4882a593Smuzhiyun osb->local_alloc_default_bits =
199*4882a593Smuzhiyun ocfs2_megabytes_to_clusters(sb, la_max_mb);
200*4882a593Smuzhiyun } else {
201*4882a593Smuzhiyun osb->local_alloc_default_bits =
202*4882a593Smuzhiyun ocfs2_megabytes_to_clusters(sb, requested_mb);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun osb->local_alloc_bits = osb->local_alloc_default_bits;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
ocfs2_la_state_enabled(struct ocfs2_super * osb)208*4882a593Smuzhiyun static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun return (osb->local_alloc_state == OCFS2_LA_THROTTLED ||
211*4882a593Smuzhiyun osb->local_alloc_state == OCFS2_LA_ENABLED);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
ocfs2_local_alloc_seen_free_bits(struct ocfs2_super * osb,unsigned int num_clusters)214*4882a593Smuzhiyun void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
215*4882a593Smuzhiyun unsigned int num_clusters)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun spin_lock(&osb->osb_lock);
218*4882a593Smuzhiyun if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
219*4882a593Smuzhiyun osb->local_alloc_state == OCFS2_LA_THROTTLED)
220*4882a593Smuzhiyun if (num_clusters >= osb->local_alloc_default_bits) {
221*4882a593Smuzhiyun cancel_delayed_work(&osb->la_enable_wq);
222*4882a593Smuzhiyun osb->local_alloc_state = OCFS2_LA_ENABLED;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun spin_unlock(&osb->osb_lock);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
ocfs2_la_enable_worker(struct work_struct * work)227*4882a593Smuzhiyun void ocfs2_la_enable_worker(struct work_struct *work)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct ocfs2_super *osb =
230*4882a593Smuzhiyun container_of(work, struct ocfs2_super,
231*4882a593Smuzhiyun la_enable_wq.work);
232*4882a593Smuzhiyun spin_lock(&osb->osb_lock);
233*4882a593Smuzhiyun osb->local_alloc_state = OCFS2_LA_ENABLED;
234*4882a593Smuzhiyun spin_unlock(&osb->osb_lock);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * Tell us whether a given allocation should use the local alloc
239*4882a593Smuzhiyun * file. Otherwise, it has to go to the main bitmap.
240*4882a593Smuzhiyun *
241*4882a593Smuzhiyun * This function does semi-dirty reads of local alloc size and state!
242*4882a593Smuzhiyun * This is ok however, as the values are re-checked once under mutex.
243*4882a593Smuzhiyun */
ocfs2_alloc_should_use_local(struct ocfs2_super * osb,u64 bits)244*4882a593Smuzhiyun int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun int ret = 0;
247*4882a593Smuzhiyun int la_bits;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun spin_lock(&osb->osb_lock);
250*4882a593Smuzhiyun la_bits = osb->local_alloc_bits;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (!ocfs2_la_state_enabled(osb))
253*4882a593Smuzhiyun goto bail;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* la_bits should be at least twice the size (in clusters) of
256*4882a593Smuzhiyun * a new block group. We want to be sure block group
257*4882a593Smuzhiyun * allocations go through the local alloc, so allow an
258*4882a593Smuzhiyun * allocation to take up to half the bitmap. */
259*4882a593Smuzhiyun if (bits > (la_bits / 2))
260*4882a593Smuzhiyun goto bail;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun ret = 1;
263*4882a593Smuzhiyun bail:
264*4882a593Smuzhiyun trace_ocfs2_alloc_should_use_local(
265*4882a593Smuzhiyun (unsigned long long)bits, osb->local_alloc_state, la_bits, ret);
266*4882a593Smuzhiyun spin_unlock(&osb->osb_lock);
267*4882a593Smuzhiyun return ret;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
ocfs2_load_local_alloc(struct ocfs2_super * osb)270*4882a593Smuzhiyun int ocfs2_load_local_alloc(struct ocfs2_super *osb)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun int status = 0;
273*4882a593Smuzhiyun struct ocfs2_dinode *alloc = NULL;
274*4882a593Smuzhiyun struct buffer_head *alloc_bh = NULL;
275*4882a593Smuzhiyun u32 num_used;
276*4882a593Smuzhiyun struct inode *inode = NULL;
277*4882a593Smuzhiyun struct ocfs2_local_alloc *la;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (osb->local_alloc_bits == 0)
280*4882a593Smuzhiyun goto bail;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (osb->local_alloc_bits >= osb->bitmap_cpg) {
283*4882a593Smuzhiyun mlog(ML_NOTICE, "Requested local alloc window %d is larger "
284*4882a593Smuzhiyun "than max possible %u. Using defaults.\n",
285*4882a593Smuzhiyun osb->local_alloc_bits, (osb->bitmap_cpg - 1));
286*4882a593Smuzhiyun osb->local_alloc_bits =
287*4882a593Smuzhiyun ocfs2_megabytes_to_clusters(osb->sb,
288*4882a593Smuzhiyun ocfs2_la_default_mb(osb));
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* read the alloc off disk */
292*4882a593Smuzhiyun inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE,
293*4882a593Smuzhiyun osb->slot_num);
294*4882a593Smuzhiyun if (!inode) {
295*4882a593Smuzhiyun status = -EINVAL;
296*4882a593Smuzhiyun mlog_errno(status);
297*4882a593Smuzhiyun goto bail;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun status = ocfs2_read_inode_block_full(inode, &alloc_bh,
301*4882a593Smuzhiyun OCFS2_BH_IGNORE_CACHE);
302*4882a593Smuzhiyun if (status < 0) {
303*4882a593Smuzhiyun mlog_errno(status);
304*4882a593Smuzhiyun goto bail;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
308*4882a593Smuzhiyun la = OCFS2_LOCAL_ALLOC(alloc);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (!(le32_to_cpu(alloc->i_flags) &
311*4882a593Smuzhiyun (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) {
312*4882a593Smuzhiyun mlog(ML_ERROR, "Invalid local alloc inode, %llu\n",
313*4882a593Smuzhiyun (unsigned long long)OCFS2_I(inode)->ip_blkno);
314*4882a593Smuzhiyun status = -EINVAL;
315*4882a593Smuzhiyun goto bail;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if ((la->la_size == 0) ||
319*4882a593Smuzhiyun (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) {
320*4882a593Smuzhiyun mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n",
321*4882a593Smuzhiyun le16_to_cpu(la->la_size));
322*4882a593Smuzhiyun status = -EINVAL;
323*4882a593Smuzhiyun goto bail;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /* do a little verification. */
327*4882a593Smuzhiyun num_used = ocfs2_local_alloc_count_bits(alloc);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* hopefully the local alloc has always been recovered before
330*4882a593Smuzhiyun * we load it. */
331*4882a593Smuzhiyun if (num_used
332*4882a593Smuzhiyun || alloc->id1.bitmap1.i_used
333*4882a593Smuzhiyun || alloc->id1.bitmap1.i_total
334*4882a593Smuzhiyun || la->la_bm_off) {
335*4882a593Smuzhiyun mlog(ML_ERROR, "inconsistent detected, clean journal with"
336*4882a593Smuzhiyun " unrecovered local alloc, please run fsck.ocfs2!\n"
337*4882a593Smuzhiyun "found = %u, set = %u, taken = %u, off = %u\n",
338*4882a593Smuzhiyun num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
339*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_total),
340*4882a593Smuzhiyun OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun status = -EINVAL;
343*4882a593Smuzhiyun goto bail;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun osb->local_alloc_bh = alloc_bh;
347*4882a593Smuzhiyun osb->local_alloc_state = OCFS2_LA_ENABLED;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun bail:
350*4882a593Smuzhiyun if (status < 0)
351*4882a593Smuzhiyun brelse(alloc_bh);
352*4882a593Smuzhiyun iput(inode);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun trace_ocfs2_load_local_alloc(osb->local_alloc_bits);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (status)
357*4882a593Smuzhiyun mlog_errno(status);
358*4882a593Smuzhiyun return status;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun * return any unused bits to the bitmap and write out a clean
363*4882a593Smuzhiyun * local_alloc.
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * local_alloc_bh is optional. If not passed, we will simply use the
366*4882a593Smuzhiyun * one off osb. If you do pass it however, be warned that it *will* be
367*4882a593Smuzhiyun * returned brelse'd and NULL'd out.*/
ocfs2_shutdown_local_alloc(struct ocfs2_super * osb)368*4882a593Smuzhiyun void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun int status;
371*4882a593Smuzhiyun handle_t *handle;
372*4882a593Smuzhiyun struct inode *local_alloc_inode = NULL;
373*4882a593Smuzhiyun struct buffer_head *bh = NULL;
374*4882a593Smuzhiyun struct buffer_head *main_bm_bh = NULL;
375*4882a593Smuzhiyun struct inode *main_bm_inode = NULL;
376*4882a593Smuzhiyun struct ocfs2_dinode *alloc_copy = NULL;
377*4882a593Smuzhiyun struct ocfs2_dinode *alloc = NULL;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun cancel_delayed_work(&osb->la_enable_wq);
380*4882a593Smuzhiyun if (osb->ocfs2_wq)
381*4882a593Smuzhiyun flush_workqueue(osb->ocfs2_wq);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (osb->local_alloc_state == OCFS2_LA_UNUSED)
384*4882a593Smuzhiyun goto out;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun local_alloc_inode =
387*4882a593Smuzhiyun ocfs2_get_system_file_inode(osb,
388*4882a593Smuzhiyun LOCAL_ALLOC_SYSTEM_INODE,
389*4882a593Smuzhiyun osb->slot_num);
390*4882a593Smuzhiyun if (!local_alloc_inode) {
391*4882a593Smuzhiyun status = -ENOENT;
392*4882a593Smuzhiyun mlog_errno(status);
393*4882a593Smuzhiyun goto out;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun osb->local_alloc_state = OCFS2_LA_DISABLED;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun ocfs2_resmap_uninit(&osb->osb_la_resmap);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun main_bm_inode = ocfs2_get_system_file_inode(osb,
401*4882a593Smuzhiyun GLOBAL_BITMAP_SYSTEM_INODE,
402*4882a593Smuzhiyun OCFS2_INVALID_SLOT);
403*4882a593Smuzhiyun if (!main_bm_inode) {
404*4882a593Smuzhiyun status = -EINVAL;
405*4882a593Smuzhiyun mlog_errno(status);
406*4882a593Smuzhiyun goto out;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun inode_lock(main_bm_inode);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
412*4882a593Smuzhiyun if (status < 0) {
413*4882a593Smuzhiyun mlog_errno(status);
414*4882a593Smuzhiyun goto out_mutex;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* WINDOW_MOVE_CREDITS is a bit heavy... */
418*4882a593Smuzhiyun handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
419*4882a593Smuzhiyun if (IS_ERR(handle)) {
420*4882a593Smuzhiyun mlog_errno(PTR_ERR(handle));
421*4882a593Smuzhiyun handle = NULL;
422*4882a593Smuzhiyun goto out_unlock;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun bh = osb->local_alloc_bh;
426*4882a593Smuzhiyun alloc = (struct ocfs2_dinode *) bh->b_data;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun alloc_copy = kmemdup(alloc, bh->b_size, GFP_NOFS);
429*4882a593Smuzhiyun if (!alloc_copy) {
430*4882a593Smuzhiyun status = -ENOMEM;
431*4882a593Smuzhiyun goto out_commit;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode),
435*4882a593Smuzhiyun bh, OCFS2_JOURNAL_ACCESS_WRITE);
436*4882a593Smuzhiyun if (status < 0) {
437*4882a593Smuzhiyun mlog_errno(status);
438*4882a593Smuzhiyun goto out_commit;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun ocfs2_clear_local_alloc(alloc);
442*4882a593Smuzhiyun ocfs2_journal_dirty(handle, bh);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun brelse(bh);
445*4882a593Smuzhiyun osb->local_alloc_bh = NULL;
446*4882a593Smuzhiyun osb->local_alloc_state = OCFS2_LA_UNUSED;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
449*4882a593Smuzhiyun main_bm_inode, main_bm_bh);
450*4882a593Smuzhiyun if (status < 0)
451*4882a593Smuzhiyun mlog_errno(status);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun out_commit:
454*4882a593Smuzhiyun ocfs2_commit_trans(osb, handle);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun out_unlock:
457*4882a593Smuzhiyun brelse(main_bm_bh);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun ocfs2_inode_unlock(main_bm_inode, 1);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun out_mutex:
462*4882a593Smuzhiyun inode_unlock(main_bm_inode);
463*4882a593Smuzhiyun iput(main_bm_inode);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun out:
466*4882a593Smuzhiyun iput(local_alloc_inode);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun kfree(alloc_copy);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * We want to free the bitmap bits outside of any recovery context as
473*4882a593Smuzhiyun * we'll need a cluster lock to do so, but we must clear the local
474*4882a593Smuzhiyun * alloc before giving up the recovered nodes journal. To solve this,
475*4882a593Smuzhiyun * we kmalloc a copy of the local alloc before it's change for the
476*4882a593Smuzhiyun * caller to process with ocfs2_complete_local_alloc_recovery
477*4882a593Smuzhiyun */
ocfs2_begin_local_alloc_recovery(struct ocfs2_super * osb,int slot_num,struct ocfs2_dinode ** alloc_copy)478*4882a593Smuzhiyun int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
479*4882a593Smuzhiyun int slot_num,
480*4882a593Smuzhiyun struct ocfs2_dinode **alloc_copy)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun int status = 0;
483*4882a593Smuzhiyun struct buffer_head *alloc_bh = NULL;
484*4882a593Smuzhiyun struct inode *inode = NULL;
485*4882a593Smuzhiyun struct ocfs2_dinode *alloc;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun trace_ocfs2_begin_local_alloc_recovery(slot_num);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun *alloc_copy = NULL;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun inode = ocfs2_get_system_file_inode(osb,
492*4882a593Smuzhiyun LOCAL_ALLOC_SYSTEM_INODE,
493*4882a593Smuzhiyun slot_num);
494*4882a593Smuzhiyun if (!inode) {
495*4882a593Smuzhiyun status = -EINVAL;
496*4882a593Smuzhiyun mlog_errno(status);
497*4882a593Smuzhiyun goto bail;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun inode_lock(inode);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun status = ocfs2_read_inode_block_full(inode, &alloc_bh,
503*4882a593Smuzhiyun OCFS2_BH_IGNORE_CACHE);
504*4882a593Smuzhiyun if (status < 0) {
505*4882a593Smuzhiyun mlog_errno(status);
506*4882a593Smuzhiyun goto bail;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun *alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL);
510*4882a593Smuzhiyun if (!(*alloc_copy)) {
511*4882a593Smuzhiyun status = -ENOMEM;
512*4882a593Smuzhiyun goto bail;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
517*4882a593Smuzhiyun ocfs2_clear_local_alloc(alloc);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check);
520*4882a593Smuzhiyun status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode));
521*4882a593Smuzhiyun if (status < 0)
522*4882a593Smuzhiyun mlog_errno(status);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun bail:
525*4882a593Smuzhiyun if (status < 0) {
526*4882a593Smuzhiyun kfree(*alloc_copy);
527*4882a593Smuzhiyun *alloc_copy = NULL;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun brelse(alloc_bh);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (inode) {
533*4882a593Smuzhiyun inode_unlock(inode);
534*4882a593Smuzhiyun iput(inode);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun if (status)
538*4882a593Smuzhiyun mlog_errno(status);
539*4882a593Smuzhiyun return status;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun * Step 2: By now, we've completed the journal recovery, we've stamped
544*4882a593Smuzhiyun * a clean local alloc on disk and dropped the node out of the
545*4882a593Smuzhiyun * recovery map. Dlm locks will no longer stall, so lets clear out the
546*4882a593Smuzhiyun * main bitmap.
547*4882a593Smuzhiyun */
ocfs2_complete_local_alloc_recovery(struct ocfs2_super * osb,struct ocfs2_dinode * alloc)548*4882a593Smuzhiyun int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
549*4882a593Smuzhiyun struct ocfs2_dinode *alloc)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun int status;
552*4882a593Smuzhiyun handle_t *handle;
553*4882a593Smuzhiyun struct buffer_head *main_bm_bh = NULL;
554*4882a593Smuzhiyun struct inode *main_bm_inode;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun main_bm_inode = ocfs2_get_system_file_inode(osb,
557*4882a593Smuzhiyun GLOBAL_BITMAP_SYSTEM_INODE,
558*4882a593Smuzhiyun OCFS2_INVALID_SLOT);
559*4882a593Smuzhiyun if (!main_bm_inode) {
560*4882a593Smuzhiyun status = -EINVAL;
561*4882a593Smuzhiyun mlog_errno(status);
562*4882a593Smuzhiyun goto out;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun inode_lock(main_bm_inode);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
568*4882a593Smuzhiyun if (status < 0) {
569*4882a593Smuzhiyun mlog_errno(status);
570*4882a593Smuzhiyun goto out_mutex;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
574*4882a593Smuzhiyun if (IS_ERR(handle)) {
575*4882a593Smuzhiyun status = PTR_ERR(handle);
576*4882a593Smuzhiyun handle = NULL;
577*4882a593Smuzhiyun mlog_errno(status);
578*4882a593Smuzhiyun goto out_unlock;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /* we want the bitmap change to be recorded on disk asap */
582*4882a593Smuzhiyun handle->h_sync = 1;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun status = ocfs2_sync_local_to_main(osb, handle, alloc,
585*4882a593Smuzhiyun main_bm_inode, main_bm_bh);
586*4882a593Smuzhiyun if (status < 0)
587*4882a593Smuzhiyun mlog_errno(status);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun ocfs2_commit_trans(osb, handle);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun out_unlock:
592*4882a593Smuzhiyun ocfs2_inode_unlock(main_bm_inode, 1);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun out_mutex:
595*4882a593Smuzhiyun inode_unlock(main_bm_inode);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun brelse(main_bm_bh);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun iput(main_bm_inode);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun out:
602*4882a593Smuzhiyun if (!status)
603*4882a593Smuzhiyun ocfs2_init_steal_slots(osb);
604*4882a593Smuzhiyun if (status)
605*4882a593Smuzhiyun mlog_errno(status);
606*4882a593Smuzhiyun return status;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun * make sure we've got at least bits_wanted contiguous bits in the
611*4882a593Smuzhiyun * local alloc. You lose them when you drop i_mutex.
612*4882a593Smuzhiyun *
613*4882a593Smuzhiyun * We will add ourselves to the transaction passed in, but may start
614*4882a593Smuzhiyun * our own in order to shift windows.
615*4882a593Smuzhiyun */
ocfs2_reserve_local_alloc_bits(struct ocfs2_super * osb,u32 bits_wanted,struct ocfs2_alloc_context * ac)616*4882a593Smuzhiyun int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
617*4882a593Smuzhiyun u32 bits_wanted,
618*4882a593Smuzhiyun struct ocfs2_alloc_context *ac)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun int status;
621*4882a593Smuzhiyun struct ocfs2_dinode *alloc;
622*4882a593Smuzhiyun struct inode *local_alloc_inode;
623*4882a593Smuzhiyun unsigned int free_bits;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun BUG_ON(!ac);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun local_alloc_inode =
628*4882a593Smuzhiyun ocfs2_get_system_file_inode(osb,
629*4882a593Smuzhiyun LOCAL_ALLOC_SYSTEM_INODE,
630*4882a593Smuzhiyun osb->slot_num);
631*4882a593Smuzhiyun if (!local_alloc_inode) {
632*4882a593Smuzhiyun status = -ENOENT;
633*4882a593Smuzhiyun mlog_errno(status);
634*4882a593Smuzhiyun goto bail;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun inode_lock(local_alloc_inode);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /*
640*4882a593Smuzhiyun * We must double check state and allocator bits because
641*4882a593Smuzhiyun * another process may have changed them while holding i_mutex.
642*4882a593Smuzhiyun */
643*4882a593Smuzhiyun spin_lock(&osb->osb_lock);
644*4882a593Smuzhiyun if (!ocfs2_la_state_enabled(osb) ||
645*4882a593Smuzhiyun (bits_wanted > osb->local_alloc_bits)) {
646*4882a593Smuzhiyun spin_unlock(&osb->osb_lock);
647*4882a593Smuzhiyun status = -ENOSPC;
648*4882a593Smuzhiyun goto bail;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun spin_unlock(&osb->osb_lock);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun #ifdef CONFIG_OCFS2_DEBUG_FS
655*4882a593Smuzhiyun if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
656*4882a593Smuzhiyun ocfs2_local_alloc_count_bits(alloc)) {
657*4882a593Smuzhiyun status = ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n",
658*4882a593Smuzhiyun (unsigned long long)le64_to_cpu(alloc->i_blkno),
659*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_used),
660*4882a593Smuzhiyun ocfs2_local_alloc_count_bits(alloc));
661*4882a593Smuzhiyun goto bail;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun #endif
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
666*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_used);
667*4882a593Smuzhiyun if (bits_wanted > free_bits) {
668*4882a593Smuzhiyun /* uhoh, window change time. */
669*4882a593Smuzhiyun status =
670*4882a593Smuzhiyun ocfs2_local_alloc_slide_window(osb, local_alloc_inode);
671*4882a593Smuzhiyun if (status < 0) {
672*4882a593Smuzhiyun if (status != -ENOSPC)
673*4882a593Smuzhiyun mlog_errno(status);
674*4882a593Smuzhiyun goto bail;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /*
678*4882a593Smuzhiyun * Under certain conditions, the window slide code
679*4882a593Smuzhiyun * might have reduced the number of bits available or
680*4882a593Smuzhiyun * disabled the local alloc entirely. Re-check
681*4882a593Smuzhiyun * here and return -ENOSPC if necessary.
682*4882a593Smuzhiyun */
683*4882a593Smuzhiyun status = -ENOSPC;
684*4882a593Smuzhiyun if (!ocfs2_la_state_enabled(osb))
685*4882a593Smuzhiyun goto bail;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
688*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_used);
689*4882a593Smuzhiyun if (bits_wanted > free_bits)
690*4882a593Smuzhiyun goto bail;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun ac->ac_inode = local_alloc_inode;
694*4882a593Smuzhiyun /* We should never use localalloc from another slot */
695*4882a593Smuzhiyun ac->ac_alloc_slot = osb->slot_num;
696*4882a593Smuzhiyun ac->ac_which = OCFS2_AC_USE_LOCAL;
697*4882a593Smuzhiyun get_bh(osb->local_alloc_bh);
698*4882a593Smuzhiyun ac->ac_bh = osb->local_alloc_bh;
699*4882a593Smuzhiyun status = 0;
700*4882a593Smuzhiyun bail:
701*4882a593Smuzhiyun if (status < 0 && local_alloc_inode) {
702*4882a593Smuzhiyun inode_unlock(local_alloc_inode);
703*4882a593Smuzhiyun iput(local_alloc_inode);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun trace_ocfs2_reserve_local_alloc_bits(
707*4882a593Smuzhiyun (unsigned long long)ac->ac_max_block,
708*4882a593Smuzhiyun bits_wanted, osb->slot_num, status);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (status)
711*4882a593Smuzhiyun mlog_errno(status);
712*4882a593Smuzhiyun return status;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
ocfs2_claim_local_alloc_bits(struct ocfs2_super * osb,handle_t * handle,struct ocfs2_alloc_context * ac,u32 bits_wanted,u32 * bit_off,u32 * num_bits)715*4882a593Smuzhiyun int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
716*4882a593Smuzhiyun handle_t *handle,
717*4882a593Smuzhiyun struct ocfs2_alloc_context *ac,
718*4882a593Smuzhiyun u32 bits_wanted,
719*4882a593Smuzhiyun u32 *bit_off,
720*4882a593Smuzhiyun u32 *num_bits)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun int status, start;
723*4882a593Smuzhiyun struct inode *local_alloc_inode;
724*4882a593Smuzhiyun void *bitmap;
725*4882a593Smuzhiyun struct ocfs2_dinode *alloc;
726*4882a593Smuzhiyun struct ocfs2_local_alloc *la;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun local_alloc_inode = ac->ac_inode;
731*4882a593Smuzhiyun alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
732*4882a593Smuzhiyun la = OCFS2_LOCAL_ALLOC(alloc);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted,
735*4882a593Smuzhiyun ac->ac_resv);
736*4882a593Smuzhiyun if (start == -1) {
737*4882a593Smuzhiyun /* TODO: Shouldn't we just BUG here? */
738*4882a593Smuzhiyun status = -ENOSPC;
739*4882a593Smuzhiyun mlog_errno(status);
740*4882a593Smuzhiyun goto bail;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun bitmap = la->la_bitmap;
744*4882a593Smuzhiyun *bit_off = le32_to_cpu(la->la_bm_off) + start;
745*4882a593Smuzhiyun *num_bits = bits_wanted;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun status = ocfs2_journal_access_di(handle,
748*4882a593Smuzhiyun INODE_CACHE(local_alloc_inode),
749*4882a593Smuzhiyun osb->local_alloc_bh,
750*4882a593Smuzhiyun OCFS2_JOURNAL_ACCESS_WRITE);
751*4882a593Smuzhiyun if (status < 0) {
752*4882a593Smuzhiyun mlog_errno(status);
753*4882a593Smuzhiyun goto bail;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start,
757*4882a593Smuzhiyun bits_wanted);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun while(bits_wanted--)
760*4882a593Smuzhiyun ocfs2_set_bit(start++, bitmap);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
763*4882a593Smuzhiyun ocfs2_journal_dirty(handle, osb->local_alloc_bh);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun bail:
766*4882a593Smuzhiyun if (status)
767*4882a593Smuzhiyun mlog_errno(status);
768*4882a593Smuzhiyun return status;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
ocfs2_free_local_alloc_bits(struct ocfs2_super * osb,handle_t * handle,struct ocfs2_alloc_context * ac,u32 bit_off,u32 num_bits)771*4882a593Smuzhiyun int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
772*4882a593Smuzhiyun handle_t *handle,
773*4882a593Smuzhiyun struct ocfs2_alloc_context *ac,
774*4882a593Smuzhiyun u32 bit_off,
775*4882a593Smuzhiyun u32 num_bits)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun int status, start;
778*4882a593Smuzhiyun u32 clear_bits;
779*4882a593Smuzhiyun struct inode *local_alloc_inode;
780*4882a593Smuzhiyun void *bitmap;
781*4882a593Smuzhiyun struct ocfs2_dinode *alloc;
782*4882a593Smuzhiyun struct ocfs2_local_alloc *la;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun local_alloc_inode = ac->ac_inode;
787*4882a593Smuzhiyun alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
788*4882a593Smuzhiyun la = OCFS2_LOCAL_ALLOC(alloc);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun bitmap = la->la_bitmap;
791*4882a593Smuzhiyun start = bit_off - le32_to_cpu(la->la_bm_off);
792*4882a593Smuzhiyun clear_bits = num_bits;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun status = ocfs2_journal_access_di(handle,
795*4882a593Smuzhiyun INODE_CACHE(local_alloc_inode),
796*4882a593Smuzhiyun osb->local_alloc_bh,
797*4882a593Smuzhiyun OCFS2_JOURNAL_ACCESS_WRITE);
798*4882a593Smuzhiyun if (status < 0) {
799*4882a593Smuzhiyun mlog_errno(status);
800*4882a593Smuzhiyun goto bail;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun while (clear_bits--)
804*4882a593Smuzhiyun ocfs2_clear_bit(start++, bitmap);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
807*4882a593Smuzhiyun ocfs2_journal_dirty(handle, osb->local_alloc_bh);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun bail:
810*4882a593Smuzhiyun return status;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
ocfs2_local_alloc_count_bits(struct ocfs2_dinode * alloc)813*4882a593Smuzhiyun static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun u32 count;
816*4882a593Smuzhiyun struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun count = memweight(la->la_bitmap, le16_to_cpu(la->la_size));
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun trace_ocfs2_local_alloc_count_bits(count);
821*4882a593Smuzhiyun return count;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
ocfs2_local_alloc_find_clear_bits(struct ocfs2_super * osb,struct ocfs2_dinode * alloc,u32 * numbits,struct ocfs2_alloc_reservation * resv)824*4882a593Smuzhiyun static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
825*4882a593Smuzhiyun struct ocfs2_dinode *alloc,
826*4882a593Smuzhiyun u32 *numbits,
827*4882a593Smuzhiyun struct ocfs2_alloc_reservation *resv)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun int numfound = 0, bitoff, left, startoff;
830*4882a593Smuzhiyun int local_resv = 0;
831*4882a593Smuzhiyun struct ocfs2_alloc_reservation r;
832*4882a593Smuzhiyun void *bitmap = NULL;
833*4882a593Smuzhiyun struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun if (!alloc->id1.bitmap1.i_total) {
836*4882a593Smuzhiyun bitoff = -1;
837*4882a593Smuzhiyun goto bail;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun if (!resv) {
841*4882a593Smuzhiyun local_resv = 1;
842*4882a593Smuzhiyun ocfs2_resv_init_once(&r);
843*4882a593Smuzhiyun ocfs2_resv_set_type(&r, OCFS2_RESV_FLAG_TMP);
844*4882a593Smuzhiyun resv = &r;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun numfound = *numbits;
848*4882a593Smuzhiyun if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) {
849*4882a593Smuzhiyun if (numfound < *numbits)
850*4882a593Smuzhiyun *numbits = numfound;
851*4882a593Smuzhiyun goto bail;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /*
855*4882a593Smuzhiyun * Code error. While reservations are enabled, local
856*4882a593Smuzhiyun * allocation should _always_ go through them.
857*4882a593Smuzhiyun */
858*4882a593Smuzhiyun BUG_ON(osb->osb_resv_level != 0);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /*
861*4882a593Smuzhiyun * Reservations are disabled. Handle this the old way.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun numfound = bitoff = startoff = 0;
867*4882a593Smuzhiyun left = le32_to_cpu(alloc->id1.bitmap1.i_total);
868*4882a593Smuzhiyun while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
869*4882a593Smuzhiyun if (bitoff == left) {
870*4882a593Smuzhiyun /* mlog(0, "bitoff (%d) == left", bitoff); */
871*4882a593Smuzhiyun break;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
874*4882a593Smuzhiyun "numfound = %d\n", bitoff, startoff, numfound);*/
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /* Ok, we found a zero bit... is it contig. or do we
877*4882a593Smuzhiyun * start over?*/
878*4882a593Smuzhiyun if (bitoff == startoff) {
879*4882a593Smuzhiyun /* we found a zero */
880*4882a593Smuzhiyun numfound++;
881*4882a593Smuzhiyun startoff++;
882*4882a593Smuzhiyun } else {
883*4882a593Smuzhiyun /* got a zero after some ones */
884*4882a593Smuzhiyun numfound = 1;
885*4882a593Smuzhiyun startoff = bitoff+1;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun /* we got everything we needed */
888*4882a593Smuzhiyun if (numfound == *numbits) {
889*4882a593Smuzhiyun /* mlog(0, "Found it all!\n"); */
890*4882a593Smuzhiyun break;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun if (numfound == *numbits)
897*4882a593Smuzhiyun bitoff = startoff - numfound;
898*4882a593Smuzhiyun else
899*4882a593Smuzhiyun bitoff = -1;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun bail:
902*4882a593Smuzhiyun if (local_resv)
903*4882a593Smuzhiyun ocfs2_resv_discard(resmap, resv);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun trace_ocfs2_local_alloc_find_clear_bits(*numbits,
906*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_total),
907*4882a593Smuzhiyun bitoff, numfound);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun return bitoff;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
ocfs2_clear_local_alloc(struct ocfs2_dinode * alloc)912*4882a593Smuzhiyun static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
915*4882a593Smuzhiyun int i;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun alloc->id1.bitmap1.i_total = 0;
918*4882a593Smuzhiyun alloc->id1.bitmap1.i_used = 0;
919*4882a593Smuzhiyun la->la_bm_off = 0;
920*4882a593Smuzhiyun for(i = 0; i < le16_to_cpu(la->la_size); i++)
921*4882a593Smuzhiyun la->la_bitmap[i] = 0;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun #if 0
925*4882a593Smuzhiyun /* turn this on and uncomment below to aid debugging window shifts. */
926*4882a593Smuzhiyun static void ocfs2_verify_zero_bits(unsigned long *bitmap,
927*4882a593Smuzhiyun unsigned int start,
928*4882a593Smuzhiyun unsigned int count)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun unsigned int tmp = count;
931*4882a593Smuzhiyun while(tmp--) {
932*4882a593Smuzhiyun if (ocfs2_test_bit(start + tmp, bitmap)) {
933*4882a593Smuzhiyun printk("ocfs2_verify_zero_bits: start = %u, count = "
934*4882a593Smuzhiyun "%u\n", start, count);
935*4882a593Smuzhiyun printk("ocfs2_verify_zero_bits: bit %u is set!",
936*4882a593Smuzhiyun start + tmp);
937*4882a593Smuzhiyun BUG();
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun #endif
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun /*
944*4882a593Smuzhiyun * sync the local alloc to main bitmap.
945*4882a593Smuzhiyun *
946*4882a593Smuzhiyun * assumes you've already locked the main bitmap -- the bitmap inode
947*4882a593Smuzhiyun * passed is used for caching.
948*4882a593Smuzhiyun */
ocfs2_sync_local_to_main(struct ocfs2_super * osb,handle_t * handle,struct ocfs2_dinode * alloc,struct inode * main_bm_inode,struct buffer_head * main_bm_bh)949*4882a593Smuzhiyun static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
950*4882a593Smuzhiyun handle_t *handle,
951*4882a593Smuzhiyun struct ocfs2_dinode *alloc,
952*4882a593Smuzhiyun struct inode *main_bm_inode,
953*4882a593Smuzhiyun struct buffer_head *main_bm_bh)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun int status = 0;
956*4882a593Smuzhiyun int bit_off, left, count, start;
957*4882a593Smuzhiyun u64 la_start_blk;
958*4882a593Smuzhiyun u64 blkno;
959*4882a593Smuzhiyun void *bitmap;
960*4882a593Smuzhiyun struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun trace_ocfs2_sync_local_to_main(
963*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_total),
964*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_used));
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun if (!alloc->id1.bitmap1.i_total) {
967*4882a593Smuzhiyun goto bail;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
971*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_total)) {
972*4882a593Smuzhiyun goto bail;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun la_start_blk = ocfs2_clusters_to_blocks(osb->sb,
976*4882a593Smuzhiyun le32_to_cpu(la->la_bm_off));
977*4882a593Smuzhiyun bitmap = la->la_bitmap;
978*4882a593Smuzhiyun start = count = bit_off = 0;
979*4882a593Smuzhiyun left = le32_to_cpu(alloc->id1.bitmap1.i_total);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
982*4882a593Smuzhiyun != -1) {
983*4882a593Smuzhiyun if ((bit_off < left) && (bit_off == start)) {
984*4882a593Smuzhiyun count++;
985*4882a593Smuzhiyun start++;
986*4882a593Smuzhiyun continue;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun if (count) {
989*4882a593Smuzhiyun blkno = la_start_blk +
990*4882a593Smuzhiyun ocfs2_clusters_to_blocks(osb->sb,
991*4882a593Smuzhiyun start - count);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun trace_ocfs2_sync_local_to_main_free(
994*4882a593Smuzhiyun count, start - count,
995*4882a593Smuzhiyun (unsigned long long)la_start_blk,
996*4882a593Smuzhiyun (unsigned long long)blkno);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun status = ocfs2_release_clusters(handle,
999*4882a593Smuzhiyun main_bm_inode,
1000*4882a593Smuzhiyun main_bm_bh, blkno,
1001*4882a593Smuzhiyun count);
1002*4882a593Smuzhiyun if (status < 0) {
1003*4882a593Smuzhiyun mlog_errno(status);
1004*4882a593Smuzhiyun goto bail;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun if (bit_off >= left)
1008*4882a593Smuzhiyun break;
1009*4882a593Smuzhiyun count = 1;
1010*4882a593Smuzhiyun start = bit_off + 1;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun bail:
1014*4882a593Smuzhiyun if (status)
1015*4882a593Smuzhiyun mlog_errno(status);
1016*4882a593Smuzhiyun return status;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun enum ocfs2_la_event {
1020*4882a593Smuzhiyun OCFS2_LA_EVENT_SLIDE, /* Normal window slide. */
1021*4882a593Smuzhiyun OCFS2_LA_EVENT_FRAGMENTED, /* The global bitmap has
1022*4882a593Smuzhiyun * enough bits theoretically
1023*4882a593Smuzhiyun * free, but a contiguous
1024*4882a593Smuzhiyun * allocation could not be
1025*4882a593Smuzhiyun * found. */
1026*4882a593Smuzhiyun OCFS2_LA_EVENT_ENOSPC, /* Global bitmap doesn't have
1027*4882a593Smuzhiyun * enough bits free to satisfy
1028*4882a593Smuzhiyun * our request. */
1029*4882a593Smuzhiyun };
1030*4882a593Smuzhiyun #define OCFS2_LA_ENABLE_INTERVAL (30 * HZ)
1031*4882a593Smuzhiyun /*
1032*4882a593Smuzhiyun * Given an event, calculate the size of our next local alloc window.
1033*4882a593Smuzhiyun *
1034*4882a593Smuzhiyun * This should always be called under i_mutex of the local alloc inode
1035*4882a593Smuzhiyun * so that local alloc disabling doesn't race with processes trying to
1036*4882a593Smuzhiyun * use the allocator.
1037*4882a593Smuzhiyun *
1038*4882a593Smuzhiyun * Returns the state which the local alloc was left in. This value can
1039*4882a593Smuzhiyun * be ignored by some paths.
1040*4882a593Smuzhiyun */
ocfs2_recalc_la_window(struct ocfs2_super * osb,enum ocfs2_la_event event)1041*4882a593Smuzhiyun static int ocfs2_recalc_la_window(struct ocfs2_super *osb,
1042*4882a593Smuzhiyun enum ocfs2_la_event event)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun unsigned int bits;
1045*4882a593Smuzhiyun int state;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun spin_lock(&osb->osb_lock);
1048*4882a593Smuzhiyun if (osb->local_alloc_state == OCFS2_LA_DISABLED) {
1049*4882a593Smuzhiyun WARN_ON_ONCE(osb->local_alloc_state == OCFS2_LA_DISABLED);
1050*4882a593Smuzhiyun goto out_unlock;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun /*
1054*4882a593Smuzhiyun * ENOSPC and fragmentation are treated similarly for now.
1055*4882a593Smuzhiyun */
1056*4882a593Smuzhiyun if (event == OCFS2_LA_EVENT_ENOSPC ||
1057*4882a593Smuzhiyun event == OCFS2_LA_EVENT_FRAGMENTED) {
1058*4882a593Smuzhiyun /*
1059*4882a593Smuzhiyun * We ran out of contiguous space in the primary
1060*4882a593Smuzhiyun * bitmap. Drastically reduce the number of bits used
1061*4882a593Smuzhiyun * by local alloc until we have to disable it.
1062*4882a593Smuzhiyun */
1063*4882a593Smuzhiyun bits = osb->local_alloc_bits >> 1;
1064*4882a593Smuzhiyun if (bits > ocfs2_megabytes_to_clusters(osb->sb, 1)) {
1065*4882a593Smuzhiyun /*
1066*4882a593Smuzhiyun * By setting state to THROTTLED, we'll keep
1067*4882a593Smuzhiyun * the number of local alloc bits used down
1068*4882a593Smuzhiyun * until an event occurs which would give us
1069*4882a593Smuzhiyun * reason to assume the bitmap situation might
1070*4882a593Smuzhiyun * have changed.
1071*4882a593Smuzhiyun */
1072*4882a593Smuzhiyun osb->local_alloc_state = OCFS2_LA_THROTTLED;
1073*4882a593Smuzhiyun osb->local_alloc_bits = bits;
1074*4882a593Smuzhiyun } else {
1075*4882a593Smuzhiyun osb->local_alloc_state = OCFS2_LA_DISABLED;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun queue_delayed_work(osb->ocfs2_wq, &osb->la_enable_wq,
1078*4882a593Smuzhiyun OCFS2_LA_ENABLE_INTERVAL);
1079*4882a593Smuzhiyun goto out_unlock;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /*
1083*4882a593Smuzhiyun * Don't increase the size of the local alloc window until we
1084*4882a593Smuzhiyun * know we might be able to fulfill the request. Otherwise, we
1085*4882a593Smuzhiyun * risk bouncing around the global bitmap during periods of
1086*4882a593Smuzhiyun * low space.
1087*4882a593Smuzhiyun */
1088*4882a593Smuzhiyun if (osb->local_alloc_state != OCFS2_LA_THROTTLED)
1089*4882a593Smuzhiyun osb->local_alloc_bits = osb->local_alloc_default_bits;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun out_unlock:
1092*4882a593Smuzhiyun state = osb->local_alloc_state;
1093*4882a593Smuzhiyun spin_unlock(&osb->osb_lock);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun return state;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
ocfs2_local_alloc_reserve_for_window(struct ocfs2_super * osb,struct ocfs2_alloc_context ** ac,struct inode ** bitmap_inode,struct buffer_head ** bitmap_bh)1098*4882a593Smuzhiyun static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
1099*4882a593Smuzhiyun struct ocfs2_alloc_context **ac,
1100*4882a593Smuzhiyun struct inode **bitmap_inode,
1101*4882a593Smuzhiyun struct buffer_head **bitmap_bh)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun int status;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
1106*4882a593Smuzhiyun if (!(*ac)) {
1107*4882a593Smuzhiyun status = -ENOMEM;
1108*4882a593Smuzhiyun mlog_errno(status);
1109*4882a593Smuzhiyun goto bail;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun retry_enospc:
1113*4882a593Smuzhiyun (*ac)->ac_bits_wanted = osb->local_alloc_bits;
1114*4882a593Smuzhiyun status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
1115*4882a593Smuzhiyun if (status == -ENOSPC) {
1116*4882a593Smuzhiyun if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
1117*4882a593Smuzhiyun OCFS2_LA_DISABLED)
1118*4882a593Smuzhiyun goto bail;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun ocfs2_free_ac_resource(*ac);
1121*4882a593Smuzhiyun memset(*ac, 0, sizeof(struct ocfs2_alloc_context));
1122*4882a593Smuzhiyun goto retry_enospc;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun if (status < 0) {
1125*4882a593Smuzhiyun mlog_errno(status);
1126*4882a593Smuzhiyun goto bail;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun *bitmap_inode = (*ac)->ac_inode;
1130*4882a593Smuzhiyun igrab(*bitmap_inode);
1131*4882a593Smuzhiyun *bitmap_bh = (*ac)->ac_bh;
1132*4882a593Smuzhiyun get_bh(*bitmap_bh);
1133*4882a593Smuzhiyun status = 0;
1134*4882a593Smuzhiyun bail:
1135*4882a593Smuzhiyun if ((status < 0) && *ac) {
1136*4882a593Smuzhiyun ocfs2_free_alloc_context(*ac);
1137*4882a593Smuzhiyun *ac = NULL;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun if (status)
1141*4882a593Smuzhiyun mlog_errno(status);
1142*4882a593Smuzhiyun return status;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun /*
1146*4882a593Smuzhiyun * pass it the bitmap lock in lock_bh if you have it.
1147*4882a593Smuzhiyun */
ocfs2_local_alloc_new_window(struct ocfs2_super * osb,handle_t * handle,struct ocfs2_alloc_context * ac)1148*4882a593Smuzhiyun static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
1149*4882a593Smuzhiyun handle_t *handle,
1150*4882a593Smuzhiyun struct ocfs2_alloc_context *ac)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun int status = 0;
1153*4882a593Smuzhiyun u32 cluster_off, cluster_count;
1154*4882a593Smuzhiyun struct ocfs2_dinode *alloc = NULL;
1155*4882a593Smuzhiyun struct ocfs2_local_alloc *la;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1158*4882a593Smuzhiyun la = OCFS2_LOCAL_ALLOC(alloc);
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun trace_ocfs2_local_alloc_new_window(
1161*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_total),
1162*4882a593Smuzhiyun osb->local_alloc_bits);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /* Instruct the allocation code to try the most recently used
1165*4882a593Smuzhiyun * cluster group. We'll re-record the group used this pass
1166*4882a593Smuzhiyun * below. */
1167*4882a593Smuzhiyun ac->ac_last_group = osb->la_last_gd;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun /* we used the generic suballoc reserve function, but we set
1170*4882a593Smuzhiyun * everything up nicely, so there's no reason why we can't use
1171*4882a593Smuzhiyun * the more specific cluster api to claim bits. */
1172*4882a593Smuzhiyun status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits,
1173*4882a593Smuzhiyun &cluster_off, &cluster_count);
1174*4882a593Smuzhiyun if (status == -ENOSPC) {
1175*4882a593Smuzhiyun retry_enospc:
1176*4882a593Smuzhiyun /*
1177*4882a593Smuzhiyun * Note: We could also try syncing the journal here to
1178*4882a593Smuzhiyun * allow use of any free bits which the current
1179*4882a593Smuzhiyun * transaction can't give us access to. --Mark
1180*4882a593Smuzhiyun */
1181*4882a593Smuzhiyun if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_FRAGMENTED) ==
1182*4882a593Smuzhiyun OCFS2_LA_DISABLED)
1183*4882a593Smuzhiyun goto bail;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun ac->ac_bits_wanted = osb->local_alloc_bits;
1186*4882a593Smuzhiyun status = ocfs2_claim_clusters(handle, ac,
1187*4882a593Smuzhiyun osb->local_alloc_bits,
1188*4882a593Smuzhiyun &cluster_off,
1189*4882a593Smuzhiyun &cluster_count);
1190*4882a593Smuzhiyun if (status == -ENOSPC)
1191*4882a593Smuzhiyun goto retry_enospc;
1192*4882a593Smuzhiyun /*
1193*4882a593Smuzhiyun * We only shrunk the *minimum* number of in our
1194*4882a593Smuzhiyun * request - it's entirely possible that the allocator
1195*4882a593Smuzhiyun * might give us more than we asked for.
1196*4882a593Smuzhiyun */
1197*4882a593Smuzhiyun if (status == 0) {
1198*4882a593Smuzhiyun spin_lock(&osb->osb_lock);
1199*4882a593Smuzhiyun osb->local_alloc_bits = cluster_count;
1200*4882a593Smuzhiyun spin_unlock(&osb->osb_lock);
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun if (status < 0) {
1204*4882a593Smuzhiyun if (status != -ENOSPC)
1205*4882a593Smuzhiyun mlog_errno(status);
1206*4882a593Smuzhiyun goto bail;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun osb->la_last_gd = ac->ac_last_group;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun la->la_bm_off = cpu_to_le32(cluster_off);
1212*4882a593Smuzhiyun alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
1213*4882a593Smuzhiyun /* just in case... In the future when we find space ourselves,
1214*4882a593Smuzhiyun * we don't have to get all contiguous -- but we'll have to
1215*4882a593Smuzhiyun * set all previously used bits in bitmap and update
1216*4882a593Smuzhiyun * la_bits_set before setting the bits in the main bitmap. */
1217*4882a593Smuzhiyun alloc->id1.bitmap1.i_used = 0;
1218*4882a593Smuzhiyun memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
1219*4882a593Smuzhiyun le16_to_cpu(la->la_size));
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
1222*4882a593Smuzhiyun OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun trace_ocfs2_local_alloc_new_window_result(
1225*4882a593Smuzhiyun OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
1226*4882a593Smuzhiyun le32_to_cpu(alloc->id1.bitmap1.i_total));
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun bail:
1229*4882a593Smuzhiyun if (status)
1230*4882a593Smuzhiyun mlog_errno(status);
1231*4882a593Smuzhiyun return status;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun /* Note that we do *NOT* lock the local alloc inode here as
1235*4882a593Smuzhiyun * it's been locked already for us. */
ocfs2_local_alloc_slide_window(struct ocfs2_super * osb,struct inode * local_alloc_inode)1236*4882a593Smuzhiyun static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
1237*4882a593Smuzhiyun struct inode *local_alloc_inode)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun int status = 0;
1240*4882a593Smuzhiyun struct buffer_head *main_bm_bh = NULL;
1241*4882a593Smuzhiyun struct inode *main_bm_inode = NULL;
1242*4882a593Smuzhiyun handle_t *handle = NULL;
1243*4882a593Smuzhiyun struct ocfs2_dinode *alloc;
1244*4882a593Smuzhiyun struct ocfs2_dinode *alloc_copy = NULL;
1245*4882a593Smuzhiyun struct ocfs2_alloc_context *ac = NULL;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /* This will lock the main bitmap for us. */
1250*4882a593Smuzhiyun status = ocfs2_local_alloc_reserve_for_window(osb,
1251*4882a593Smuzhiyun &ac,
1252*4882a593Smuzhiyun &main_bm_inode,
1253*4882a593Smuzhiyun &main_bm_bh);
1254*4882a593Smuzhiyun if (status < 0) {
1255*4882a593Smuzhiyun if (status != -ENOSPC)
1256*4882a593Smuzhiyun mlog_errno(status);
1257*4882a593Smuzhiyun goto bail;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
1261*4882a593Smuzhiyun if (IS_ERR(handle)) {
1262*4882a593Smuzhiyun status = PTR_ERR(handle);
1263*4882a593Smuzhiyun handle = NULL;
1264*4882a593Smuzhiyun mlog_errno(status);
1265*4882a593Smuzhiyun goto bail;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun /* We want to clear the local alloc before doing anything
1271*4882a593Smuzhiyun * else, so that if we error later during this operation,
1272*4882a593Smuzhiyun * local alloc shutdown won't try to double free main bitmap
1273*4882a593Smuzhiyun * bits. Make a copy so the sync function knows which bits to
1274*4882a593Smuzhiyun * free. */
1275*4882a593Smuzhiyun alloc_copy = kmemdup(alloc, osb->local_alloc_bh->b_size, GFP_NOFS);
1276*4882a593Smuzhiyun if (!alloc_copy) {
1277*4882a593Smuzhiyun status = -ENOMEM;
1278*4882a593Smuzhiyun mlog_errno(status);
1279*4882a593Smuzhiyun goto bail;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun status = ocfs2_journal_access_di(handle,
1283*4882a593Smuzhiyun INODE_CACHE(local_alloc_inode),
1284*4882a593Smuzhiyun osb->local_alloc_bh,
1285*4882a593Smuzhiyun OCFS2_JOURNAL_ACCESS_WRITE);
1286*4882a593Smuzhiyun if (status < 0) {
1287*4882a593Smuzhiyun mlog_errno(status);
1288*4882a593Smuzhiyun goto bail;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun ocfs2_clear_local_alloc(alloc);
1292*4882a593Smuzhiyun ocfs2_journal_dirty(handle, osb->local_alloc_bh);
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
1295*4882a593Smuzhiyun main_bm_inode, main_bm_bh);
1296*4882a593Smuzhiyun if (status < 0) {
1297*4882a593Smuzhiyun mlog_errno(status);
1298*4882a593Smuzhiyun goto bail;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun status = ocfs2_local_alloc_new_window(osb, handle, ac);
1302*4882a593Smuzhiyun if (status < 0) {
1303*4882a593Smuzhiyun if (status != -ENOSPC)
1304*4882a593Smuzhiyun mlog_errno(status);
1305*4882a593Smuzhiyun goto bail;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun atomic_inc(&osb->alloc_stats.moves);
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun bail:
1311*4882a593Smuzhiyun if (handle)
1312*4882a593Smuzhiyun ocfs2_commit_trans(osb, handle);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun brelse(main_bm_bh);
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun iput(main_bm_inode);
1317*4882a593Smuzhiyun kfree(alloc_copy);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun if (ac)
1320*4882a593Smuzhiyun ocfs2_free_alloc_context(ac);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun if (status)
1323*4882a593Smuzhiyun mlog_errno(status);
1324*4882a593Smuzhiyun return status;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
1327