1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2007 Oracle. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <linux/sched/mm.h>
8*4882a593Smuzhiyun #include <linux/bio.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun #include <linux/ratelimit.h>
12*4882a593Smuzhiyun #include <linux/kthread.h>
13*4882a593Smuzhiyun #include <linux/raid/pq.h>
14*4882a593Smuzhiyun #include <linux/semaphore.h>
15*4882a593Smuzhiyun #include <linux/uuid.h>
16*4882a593Smuzhiyun #include <linux/list_sort.h>
17*4882a593Smuzhiyun #include <linux/namei.h>
18*4882a593Smuzhiyun #include "misc.h"
19*4882a593Smuzhiyun #include "ctree.h"
20*4882a593Smuzhiyun #include "extent_map.h"
21*4882a593Smuzhiyun #include "disk-io.h"
22*4882a593Smuzhiyun #include "transaction.h"
23*4882a593Smuzhiyun #include "print-tree.h"
24*4882a593Smuzhiyun #include "volumes.h"
25*4882a593Smuzhiyun #include "raid56.h"
26*4882a593Smuzhiyun #include "async-thread.h"
27*4882a593Smuzhiyun #include "check-integrity.h"
28*4882a593Smuzhiyun #include "rcu-string.h"
29*4882a593Smuzhiyun #include "dev-replace.h"
30*4882a593Smuzhiyun #include "sysfs.h"
31*4882a593Smuzhiyun #include "tree-checker.h"
32*4882a593Smuzhiyun #include "space-info.h"
33*4882a593Smuzhiyun #include "block-group.h"
34*4882a593Smuzhiyun #include "discard.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37*4882a593Smuzhiyun [BTRFS_RAID_RAID10] = {
38*4882a593Smuzhiyun .sub_stripes = 2,
39*4882a593Smuzhiyun .dev_stripes = 1,
40*4882a593Smuzhiyun .devs_max = 0, /* 0 == as many as possible */
41*4882a593Smuzhiyun .devs_min = 4,
42*4882a593Smuzhiyun .tolerated_failures = 1,
43*4882a593Smuzhiyun .devs_increment = 2,
44*4882a593Smuzhiyun .ncopies = 2,
45*4882a593Smuzhiyun .nparity = 0,
46*4882a593Smuzhiyun .raid_name = "raid10",
47*4882a593Smuzhiyun .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
48*4882a593Smuzhiyun .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49*4882a593Smuzhiyun },
50*4882a593Smuzhiyun [BTRFS_RAID_RAID1] = {
51*4882a593Smuzhiyun .sub_stripes = 1,
52*4882a593Smuzhiyun .dev_stripes = 1,
53*4882a593Smuzhiyun .devs_max = 2,
54*4882a593Smuzhiyun .devs_min = 2,
55*4882a593Smuzhiyun .tolerated_failures = 1,
56*4882a593Smuzhiyun .devs_increment = 2,
57*4882a593Smuzhiyun .ncopies = 2,
58*4882a593Smuzhiyun .nparity = 0,
59*4882a593Smuzhiyun .raid_name = "raid1",
60*4882a593Smuzhiyun .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
61*4882a593Smuzhiyun .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62*4882a593Smuzhiyun },
63*4882a593Smuzhiyun [BTRFS_RAID_RAID1C3] = {
64*4882a593Smuzhiyun .sub_stripes = 1,
65*4882a593Smuzhiyun .dev_stripes = 1,
66*4882a593Smuzhiyun .devs_max = 3,
67*4882a593Smuzhiyun .devs_min = 3,
68*4882a593Smuzhiyun .tolerated_failures = 2,
69*4882a593Smuzhiyun .devs_increment = 3,
70*4882a593Smuzhiyun .ncopies = 3,
71*4882a593Smuzhiyun .nparity = 0,
72*4882a593Smuzhiyun .raid_name = "raid1c3",
73*4882a593Smuzhiyun .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
74*4882a593Smuzhiyun .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75*4882a593Smuzhiyun },
76*4882a593Smuzhiyun [BTRFS_RAID_RAID1C4] = {
77*4882a593Smuzhiyun .sub_stripes = 1,
78*4882a593Smuzhiyun .dev_stripes = 1,
79*4882a593Smuzhiyun .devs_max = 4,
80*4882a593Smuzhiyun .devs_min = 4,
81*4882a593Smuzhiyun .tolerated_failures = 3,
82*4882a593Smuzhiyun .devs_increment = 4,
83*4882a593Smuzhiyun .ncopies = 4,
84*4882a593Smuzhiyun .nparity = 0,
85*4882a593Smuzhiyun .raid_name = "raid1c4",
86*4882a593Smuzhiyun .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
87*4882a593Smuzhiyun .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88*4882a593Smuzhiyun },
89*4882a593Smuzhiyun [BTRFS_RAID_DUP] = {
90*4882a593Smuzhiyun .sub_stripes = 1,
91*4882a593Smuzhiyun .dev_stripes = 2,
92*4882a593Smuzhiyun .devs_max = 1,
93*4882a593Smuzhiyun .devs_min = 1,
94*4882a593Smuzhiyun .tolerated_failures = 0,
95*4882a593Smuzhiyun .devs_increment = 1,
96*4882a593Smuzhiyun .ncopies = 2,
97*4882a593Smuzhiyun .nparity = 0,
98*4882a593Smuzhiyun .raid_name = "dup",
99*4882a593Smuzhiyun .bg_flag = BTRFS_BLOCK_GROUP_DUP,
100*4882a593Smuzhiyun .mindev_error = 0,
101*4882a593Smuzhiyun },
102*4882a593Smuzhiyun [BTRFS_RAID_RAID0] = {
103*4882a593Smuzhiyun .sub_stripes = 1,
104*4882a593Smuzhiyun .dev_stripes = 1,
105*4882a593Smuzhiyun .devs_max = 0,
106*4882a593Smuzhiyun .devs_min = 2,
107*4882a593Smuzhiyun .tolerated_failures = 0,
108*4882a593Smuzhiyun .devs_increment = 1,
109*4882a593Smuzhiyun .ncopies = 1,
110*4882a593Smuzhiyun .nparity = 0,
111*4882a593Smuzhiyun .raid_name = "raid0",
112*4882a593Smuzhiyun .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
113*4882a593Smuzhiyun .mindev_error = 0,
114*4882a593Smuzhiyun },
115*4882a593Smuzhiyun [BTRFS_RAID_SINGLE] = {
116*4882a593Smuzhiyun .sub_stripes = 1,
117*4882a593Smuzhiyun .dev_stripes = 1,
118*4882a593Smuzhiyun .devs_max = 1,
119*4882a593Smuzhiyun .devs_min = 1,
120*4882a593Smuzhiyun .tolerated_failures = 0,
121*4882a593Smuzhiyun .devs_increment = 1,
122*4882a593Smuzhiyun .ncopies = 1,
123*4882a593Smuzhiyun .nparity = 0,
124*4882a593Smuzhiyun .raid_name = "single",
125*4882a593Smuzhiyun .bg_flag = 0,
126*4882a593Smuzhiyun .mindev_error = 0,
127*4882a593Smuzhiyun },
128*4882a593Smuzhiyun [BTRFS_RAID_RAID5] = {
129*4882a593Smuzhiyun .sub_stripes = 1,
130*4882a593Smuzhiyun .dev_stripes = 1,
131*4882a593Smuzhiyun .devs_max = 0,
132*4882a593Smuzhiyun .devs_min = 2,
133*4882a593Smuzhiyun .tolerated_failures = 1,
134*4882a593Smuzhiyun .devs_increment = 1,
135*4882a593Smuzhiyun .ncopies = 1,
136*4882a593Smuzhiyun .nparity = 1,
137*4882a593Smuzhiyun .raid_name = "raid5",
138*4882a593Smuzhiyun .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
139*4882a593Smuzhiyun .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140*4882a593Smuzhiyun },
141*4882a593Smuzhiyun [BTRFS_RAID_RAID6] = {
142*4882a593Smuzhiyun .sub_stripes = 1,
143*4882a593Smuzhiyun .dev_stripes = 1,
144*4882a593Smuzhiyun .devs_max = 0,
145*4882a593Smuzhiyun .devs_min = 3,
146*4882a593Smuzhiyun .tolerated_failures = 2,
147*4882a593Smuzhiyun .devs_increment = 1,
148*4882a593Smuzhiyun .ncopies = 1,
149*4882a593Smuzhiyun .nparity = 2,
150*4882a593Smuzhiyun .raid_name = "raid6",
151*4882a593Smuzhiyun .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
152*4882a593Smuzhiyun .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153*4882a593Smuzhiyun },
154*4882a593Smuzhiyun };
155*4882a593Smuzhiyun
btrfs_bg_type_to_raid_name(u64 flags)156*4882a593Smuzhiyun const char *btrfs_bg_type_to_raid_name(u64 flags)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun const int index = btrfs_bg_flags_to_raid_index(flags);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (index >= BTRFS_NR_RAID_TYPES)
161*4882a593Smuzhiyun return NULL;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return btrfs_raid_array[index].raid_name;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * Fill @buf with textual description of @bg_flags, no more than @size_buf
168*4882a593Smuzhiyun * bytes including terminating null byte.
169*4882a593Smuzhiyun */
btrfs_describe_block_groups(u64 bg_flags,char * buf,u32 size_buf)170*4882a593Smuzhiyun void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun int i;
173*4882a593Smuzhiyun int ret;
174*4882a593Smuzhiyun char *bp = buf;
175*4882a593Smuzhiyun u64 flags = bg_flags;
176*4882a593Smuzhiyun u32 size_bp = size_buf;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (!flags) {
179*4882a593Smuzhiyun strcpy(bp, "NONE");
180*4882a593Smuzhiyun return;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #define DESCRIBE_FLAG(flag, desc) \
184*4882a593Smuzhiyun do { \
185*4882a593Smuzhiyun if (flags & (flag)) { \
186*4882a593Smuzhiyun ret = snprintf(bp, size_bp, "%s|", (desc)); \
187*4882a593Smuzhiyun if (ret < 0 || ret >= size_bp) \
188*4882a593Smuzhiyun goto out_overflow; \
189*4882a593Smuzhiyun size_bp -= ret; \
190*4882a593Smuzhiyun bp += ret; \
191*4882a593Smuzhiyun flags &= ~(flag); \
192*4882a593Smuzhiyun } \
193*4882a593Smuzhiyun } while (0)
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
196*4882a593Smuzhiyun DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
197*4882a593Smuzhiyun DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
200*4882a593Smuzhiyun for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
201*4882a593Smuzhiyun DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
202*4882a593Smuzhiyun btrfs_raid_array[i].raid_name);
203*4882a593Smuzhiyun #undef DESCRIBE_FLAG
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (flags) {
206*4882a593Smuzhiyun ret = snprintf(bp, size_bp, "0x%llx|", flags);
207*4882a593Smuzhiyun size_bp -= ret;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (size_bp < size_buf)
211*4882a593Smuzhiyun buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * The text is trimmed, it's up to the caller to provide sufficiently
215*4882a593Smuzhiyun * large buffer
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun out_overflow:;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun static int init_first_rw_device(struct btrfs_trans_handle *trans);
221*4882a593Smuzhiyun static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
222*4882a593Smuzhiyun static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
223*4882a593Smuzhiyun static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
224*4882a593Smuzhiyun static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
225*4882a593Smuzhiyun enum btrfs_map_op op,
226*4882a593Smuzhiyun u64 logical, u64 *length,
227*4882a593Smuzhiyun struct btrfs_bio **bbio_ret,
228*4882a593Smuzhiyun int mirror_num, int need_raid_map);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * Device locking
232*4882a593Smuzhiyun * ==============
233*4882a593Smuzhiyun *
234*4882a593Smuzhiyun * There are several mutexes that protect manipulation of devices and low-level
235*4882a593Smuzhiyun * structures like chunks but not block groups, extents or files
236*4882a593Smuzhiyun *
237*4882a593Smuzhiyun * uuid_mutex (global lock)
238*4882a593Smuzhiyun * ------------------------
239*4882a593Smuzhiyun * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
240*4882a593Smuzhiyun * the SCAN_DEV ioctl registration or from mount either implicitly (the first
241*4882a593Smuzhiyun * device) or requested by the device= mount option
242*4882a593Smuzhiyun *
243*4882a593Smuzhiyun * the mutex can be very coarse and can cover long-running operations
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * protects: updates to fs_devices counters like missing devices, rw devices,
246*4882a593Smuzhiyun * seeding, structure cloning, opening/closing devices at mount/umount time
247*4882a593Smuzhiyun *
248*4882a593Smuzhiyun * global::fs_devs - add, remove, updates to the global list
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * does not protect: manipulation of the fs_devices::devices list in general
251*4882a593Smuzhiyun * but in mount context it could be used to exclude list modifications by eg.
252*4882a593Smuzhiyun * scan ioctl
253*4882a593Smuzhiyun *
254*4882a593Smuzhiyun * btrfs_device::name - renames (write side), read is RCU
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun * fs_devices::device_list_mutex (per-fs, with RCU)
257*4882a593Smuzhiyun * ------------------------------------------------
258*4882a593Smuzhiyun * protects updates to fs_devices::devices, ie. adding and deleting
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * simple list traversal with read-only actions can be done with RCU protection
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * may be used to exclude some operations from running concurrently without any
263*4882a593Smuzhiyun * modifications to the list (see write_all_supers)
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * Is not required at mount and close times, because our device list is
266*4882a593Smuzhiyun * protected by the uuid_mutex at that point.
267*4882a593Smuzhiyun *
268*4882a593Smuzhiyun * balance_mutex
269*4882a593Smuzhiyun * -------------
270*4882a593Smuzhiyun * protects balance structures (status, state) and context accessed from
271*4882a593Smuzhiyun * several places (internally, ioctl)
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun * chunk_mutex
274*4882a593Smuzhiyun * -----------
275*4882a593Smuzhiyun * protects chunks, adding or removing during allocation, trim or when a new
276*4882a593Smuzhiyun * device is added/removed. Additionally it also protects post_commit_list of
277*4882a593Smuzhiyun * individual devices, since they can be added to the transaction's
278*4882a593Smuzhiyun * post_commit_list only with chunk_mutex held.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * cleaner_mutex
281*4882a593Smuzhiyun * -------------
282*4882a593Smuzhiyun * a big lock that is held by the cleaner thread and prevents running subvolume
283*4882a593Smuzhiyun * cleaning together with relocation or delayed iputs
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun *
286*4882a593Smuzhiyun * Lock nesting
287*4882a593Smuzhiyun * ============
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun * uuid_mutex
290*4882a593Smuzhiyun * device_list_mutex
291*4882a593Smuzhiyun * chunk_mutex
292*4882a593Smuzhiyun * balance_mutex
293*4882a593Smuzhiyun *
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * Exclusive operations
296*4882a593Smuzhiyun * ====================
297*4882a593Smuzhiyun *
298*4882a593Smuzhiyun * Maintains the exclusivity of the following operations that apply to the
299*4882a593Smuzhiyun * whole filesystem and cannot run in parallel.
300*4882a593Smuzhiyun *
301*4882a593Smuzhiyun * - Balance (*)
302*4882a593Smuzhiyun * - Device add
303*4882a593Smuzhiyun * - Device remove
304*4882a593Smuzhiyun * - Device replace (*)
305*4882a593Smuzhiyun * - Resize
306*4882a593Smuzhiyun *
307*4882a593Smuzhiyun * The device operations (as above) can be in one of the following states:
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * - Running state
310*4882a593Smuzhiyun * - Paused state
311*4882a593Smuzhiyun * - Completed state
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * Only device operations marked with (*) can go into the Paused state for the
314*4882a593Smuzhiyun * following reasons:
315*4882a593Smuzhiyun *
316*4882a593Smuzhiyun * - ioctl (only Balance can be Paused through ioctl)
317*4882a593Smuzhiyun * - filesystem remounted as read-only
318*4882a593Smuzhiyun * - filesystem unmounted and mounted as read-only
319*4882a593Smuzhiyun * - system power-cycle and filesystem mounted as read-only
320*4882a593Smuzhiyun * - filesystem or device errors leading to forced read-only
321*4882a593Smuzhiyun *
322*4882a593Smuzhiyun * The status of exclusive operation is set and cleared atomically.
323*4882a593Smuzhiyun * During the course of Paused state, fs_info::exclusive_operation remains set.
324*4882a593Smuzhiyun * A device operation in Paused or Running state can be canceled or resumed
325*4882a593Smuzhiyun * either by ioctl (Balance only) or when remounted as read-write.
326*4882a593Smuzhiyun * The exclusive status is cleared when the device operation is canceled or
327*4882a593Smuzhiyun * completed.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun DEFINE_MUTEX(uuid_mutex);
331*4882a593Smuzhiyun static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)332*4882a593Smuzhiyun struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun return &fs_uuids;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /*
338*4882a593Smuzhiyun * alloc_fs_devices - allocate struct btrfs_fs_devices
339*4882a593Smuzhiyun * @fsid: if not NULL, copy the UUID to fs_devices::fsid
340*4882a593Smuzhiyun * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
343*4882a593Smuzhiyun * The returned struct is not linked onto any lists and can be destroyed with
344*4882a593Smuzhiyun * kfree() right away.
345*4882a593Smuzhiyun */
alloc_fs_devices(const u8 * fsid,const u8 * metadata_fsid)346*4882a593Smuzhiyun static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
347*4882a593Smuzhiyun const u8 *metadata_fsid)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devs;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
352*4882a593Smuzhiyun if (!fs_devs)
353*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun mutex_init(&fs_devs->device_list_mutex);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun INIT_LIST_HEAD(&fs_devs->devices);
358*4882a593Smuzhiyun INIT_LIST_HEAD(&fs_devs->alloc_list);
359*4882a593Smuzhiyun INIT_LIST_HEAD(&fs_devs->fs_list);
360*4882a593Smuzhiyun INIT_LIST_HEAD(&fs_devs->seed_list);
361*4882a593Smuzhiyun if (fsid)
362*4882a593Smuzhiyun memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (metadata_fsid)
365*4882a593Smuzhiyun memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
366*4882a593Smuzhiyun else if (fsid)
367*4882a593Smuzhiyun memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return fs_devs;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
btrfs_free_device(struct btrfs_device * device)372*4882a593Smuzhiyun void btrfs_free_device(struct btrfs_device *device)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun WARN_ON(!list_empty(&device->post_commit_list));
375*4882a593Smuzhiyun rcu_string_free(device->name);
376*4882a593Smuzhiyun extent_io_tree_release(&device->alloc_state);
377*4882a593Smuzhiyun bio_put(device->flush_bio);
378*4882a593Smuzhiyun kfree(device);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
free_fs_devices(struct btrfs_fs_devices * fs_devices)381*4882a593Smuzhiyun static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun struct btrfs_device *device;
384*4882a593Smuzhiyun WARN_ON(fs_devices->opened);
385*4882a593Smuzhiyun while (!list_empty(&fs_devices->devices)) {
386*4882a593Smuzhiyun device = list_entry(fs_devices->devices.next,
387*4882a593Smuzhiyun struct btrfs_device, dev_list);
388*4882a593Smuzhiyun list_del(&device->dev_list);
389*4882a593Smuzhiyun btrfs_free_device(device);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun kfree(fs_devices);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
btrfs_cleanup_fs_uuids(void)394*4882a593Smuzhiyun void __exit btrfs_cleanup_fs_uuids(void)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun while (!list_empty(&fs_uuids)) {
399*4882a593Smuzhiyun fs_devices = list_entry(fs_uuids.next,
400*4882a593Smuzhiyun struct btrfs_fs_devices, fs_list);
401*4882a593Smuzhiyun list_del(&fs_devices->fs_list);
402*4882a593Smuzhiyun free_fs_devices(fs_devices);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /*
407*4882a593Smuzhiyun * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
408*4882a593Smuzhiyun * Returned struct is not linked onto any lists and must be destroyed using
409*4882a593Smuzhiyun * btrfs_free_device.
410*4882a593Smuzhiyun */
__alloc_device(struct btrfs_fs_info * fs_info)411*4882a593Smuzhiyun static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct btrfs_device *dev;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun dev = kzalloc(sizeof(*dev), GFP_KERNEL);
416*4882a593Smuzhiyun if (!dev)
417*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * Preallocate a bio that's always going to be used for flushing device
421*4882a593Smuzhiyun * barriers and matches the device lifespan
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
424*4882a593Smuzhiyun if (!dev->flush_bio) {
425*4882a593Smuzhiyun kfree(dev);
426*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->dev_list);
430*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->dev_alloc_list);
431*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->post_commit_list);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun atomic_set(&dev->reada_in_flight, 0);
434*4882a593Smuzhiyun atomic_set(&dev->dev_stats_ccnt, 0);
435*4882a593Smuzhiyun btrfs_device_data_ordered_init(dev);
436*4882a593Smuzhiyun INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
437*4882a593Smuzhiyun INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
438*4882a593Smuzhiyun extent_io_tree_init(fs_info, &dev->alloc_state,
439*4882a593Smuzhiyun IO_TREE_DEVICE_ALLOC_STATE, NULL);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun return dev;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
find_fsid(const u8 * fsid,const u8 * metadata_fsid)444*4882a593Smuzhiyun static noinline struct btrfs_fs_devices *find_fsid(
445*4882a593Smuzhiyun const u8 *fsid, const u8 *metadata_fsid)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun ASSERT(fsid);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /* Handle non-split brain cases */
452*4882a593Smuzhiyun list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
453*4882a593Smuzhiyun if (metadata_fsid) {
454*4882a593Smuzhiyun if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
455*4882a593Smuzhiyun && memcmp(metadata_fsid, fs_devices->metadata_uuid,
456*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0)
457*4882a593Smuzhiyun return fs_devices;
458*4882a593Smuzhiyun } else {
459*4882a593Smuzhiyun if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
460*4882a593Smuzhiyun return fs_devices;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun return NULL;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
find_fsid_with_metadata_uuid(struct btrfs_super_block * disk_super)466*4882a593Smuzhiyun static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
467*4882a593Smuzhiyun struct btrfs_super_block *disk_super)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /*
473*4882a593Smuzhiyun * Handle scanned device having completed its fsid change but
474*4882a593Smuzhiyun * belonging to a fs_devices that was created by first scanning
475*4882a593Smuzhiyun * a device which didn't have its fsid/metadata_uuid changed
476*4882a593Smuzhiyun * at all and the CHANGING_FSID_V2 flag set.
477*4882a593Smuzhiyun */
478*4882a593Smuzhiyun list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
479*4882a593Smuzhiyun if (fs_devices->fsid_change &&
480*4882a593Smuzhiyun memcmp(disk_super->metadata_uuid, fs_devices->fsid,
481*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0 &&
482*4882a593Smuzhiyun memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
483*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0) {
484*4882a593Smuzhiyun return fs_devices;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun * Handle scanned device having completed its fsid change but
489*4882a593Smuzhiyun * belonging to a fs_devices that was created by a device that
490*4882a593Smuzhiyun * has an outdated pair of fsid/metadata_uuid and
491*4882a593Smuzhiyun * CHANGING_FSID_V2 flag set.
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
494*4882a593Smuzhiyun if (fs_devices->fsid_change &&
495*4882a593Smuzhiyun memcmp(fs_devices->metadata_uuid,
496*4882a593Smuzhiyun fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
497*4882a593Smuzhiyun memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
498*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0) {
499*4882a593Smuzhiyun return fs_devices;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct btrfs_super_block ** disk_super)508*4882a593Smuzhiyun btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
509*4882a593Smuzhiyun int flush, struct block_device **bdev,
510*4882a593Smuzhiyun struct btrfs_super_block **disk_super)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun int ret;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun *bdev = blkdev_get_by_path(device_path, flags, holder);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (IS_ERR(*bdev)) {
517*4882a593Smuzhiyun ret = PTR_ERR(*bdev);
518*4882a593Smuzhiyun goto error;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (flush)
522*4882a593Smuzhiyun filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
523*4882a593Smuzhiyun ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
524*4882a593Smuzhiyun if (ret) {
525*4882a593Smuzhiyun blkdev_put(*bdev, flags);
526*4882a593Smuzhiyun goto error;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun invalidate_bdev(*bdev);
529*4882a593Smuzhiyun *disk_super = btrfs_read_dev_super(*bdev);
530*4882a593Smuzhiyun if (IS_ERR(*disk_super)) {
531*4882a593Smuzhiyun ret = PTR_ERR(*disk_super);
532*4882a593Smuzhiyun blkdev_put(*bdev, flags);
533*4882a593Smuzhiyun goto error;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun error:
539*4882a593Smuzhiyun *bdev = NULL;
540*4882a593Smuzhiyun return ret;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /*
544*4882a593Smuzhiyun * Check if the device in the path matches the device in the given struct device.
545*4882a593Smuzhiyun *
546*4882a593Smuzhiyun * Returns:
547*4882a593Smuzhiyun * true If it is the same device.
548*4882a593Smuzhiyun * false If it is not the same device or on error.
549*4882a593Smuzhiyun */
device_matched(const struct btrfs_device * device,const char * path)550*4882a593Smuzhiyun static bool device_matched(const struct btrfs_device *device, const char *path)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun char *device_name;
553*4882a593Smuzhiyun struct block_device *bdev_old;
554*4882a593Smuzhiyun struct block_device *bdev_new;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /*
557*4882a593Smuzhiyun * If we are looking for a device with the matching dev_t, then skip
558*4882a593Smuzhiyun * device without a name (a missing device).
559*4882a593Smuzhiyun */
560*4882a593Smuzhiyun if (!device->name)
561*4882a593Smuzhiyun return false;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
564*4882a593Smuzhiyun if (!device_name)
565*4882a593Smuzhiyun return false;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun rcu_read_lock();
568*4882a593Smuzhiyun scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
569*4882a593Smuzhiyun rcu_read_unlock();
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun bdev_old = lookup_bdev(device_name);
572*4882a593Smuzhiyun kfree(device_name);
573*4882a593Smuzhiyun if (IS_ERR(bdev_old))
574*4882a593Smuzhiyun return false;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun bdev_new = lookup_bdev(path);
577*4882a593Smuzhiyun if (IS_ERR(bdev_new))
578*4882a593Smuzhiyun return false;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (bdev_old == bdev_new)
581*4882a593Smuzhiyun return true;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun return false;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /*
587*4882a593Smuzhiyun * Search and remove all stale (devices which are not mounted) devices.
588*4882a593Smuzhiyun * When both inputs are NULL, it will search and release all stale devices.
589*4882a593Smuzhiyun * path: Optional. When provided will it release all unmounted devices
590*4882a593Smuzhiyun * matching this path only.
591*4882a593Smuzhiyun * skip_dev: Optional. Will skip this device when searching for the stale
592*4882a593Smuzhiyun * devices.
593*4882a593Smuzhiyun * Return: 0 for success or if @path is NULL.
594*4882a593Smuzhiyun * -EBUSY if @path is a mounted device.
595*4882a593Smuzhiyun * -ENOENT if @path does not match any device in the list.
596*4882a593Smuzhiyun */
btrfs_free_stale_devices(const char * path,struct btrfs_device * skip_device)597*4882a593Smuzhiyun static int btrfs_free_stale_devices(const char *path,
598*4882a593Smuzhiyun struct btrfs_device *skip_device)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
601*4882a593Smuzhiyun struct btrfs_device *device, *tmp_device;
602*4882a593Smuzhiyun int ret = 0;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun lockdep_assert_held(&uuid_mutex);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun if (path)
607*4882a593Smuzhiyun ret = -ENOENT;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
612*4882a593Smuzhiyun list_for_each_entry_safe(device, tmp_device,
613*4882a593Smuzhiyun &fs_devices->devices, dev_list) {
614*4882a593Smuzhiyun if (skip_device && skip_device == device)
615*4882a593Smuzhiyun continue;
616*4882a593Smuzhiyun if (path && !device_matched(device, path))
617*4882a593Smuzhiyun continue;
618*4882a593Smuzhiyun if (fs_devices->opened) {
619*4882a593Smuzhiyun /* for an already deleted device return 0 */
620*4882a593Smuzhiyun if (path && ret != 0)
621*4882a593Smuzhiyun ret = -EBUSY;
622*4882a593Smuzhiyun break;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* delete the stale device */
626*4882a593Smuzhiyun fs_devices->num_devices--;
627*4882a593Smuzhiyun list_del(&device->dev_list);
628*4882a593Smuzhiyun btrfs_free_device(device);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun ret = 0;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (fs_devices->num_devices == 0) {
635*4882a593Smuzhiyun btrfs_sysfs_remove_fsid(fs_devices);
636*4882a593Smuzhiyun list_del(&fs_devices->fs_list);
637*4882a593Smuzhiyun free_fs_devices(fs_devices);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return ret;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /*
645*4882a593Smuzhiyun * This is only used on mount, and we are protected from competing things
646*4882a593Smuzhiyun * messing with our fs_devices by the uuid_mutex, thus we do not need the
647*4882a593Smuzhiyun * fs_devices->device_list_mutex here.
648*4882a593Smuzhiyun */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,fmode_t flags,void * holder)649*4882a593Smuzhiyun static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
650*4882a593Smuzhiyun struct btrfs_device *device, fmode_t flags,
651*4882a593Smuzhiyun void *holder)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun struct request_queue *q;
654*4882a593Smuzhiyun struct block_device *bdev;
655*4882a593Smuzhiyun struct btrfs_super_block *disk_super;
656*4882a593Smuzhiyun u64 devid;
657*4882a593Smuzhiyun int ret;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (device->bdev)
660*4882a593Smuzhiyun return -EINVAL;
661*4882a593Smuzhiyun if (!device->name)
662*4882a593Smuzhiyun return -EINVAL;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
665*4882a593Smuzhiyun &bdev, &disk_super);
666*4882a593Smuzhiyun if (ret)
667*4882a593Smuzhiyun return ret;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun devid = btrfs_stack_device_id(&disk_super->dev_item);
670*4882a593Smuzhiyun if (devid != device->devid)
671*4882a593Smuzhiyun goto error_free_page;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
674*4882a593Smuzhiyun goto error_free_page;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun device->generation = btrfs_super_generation(disk_super);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
679*4882a593Smuzhiyun if (btrfs_super_incompat_flags(disk_super) &
680*4882a593Smuzhiyun BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
681*4882a593Smuzhiyun pr_err(
682*4882a593Smuzhiyun "BTRFS: Invalid seeding and uuid-changed device detected\n");
683*4882a593Smuzhiyun goto error_free_page;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
687*4882a593Smuzhiyun fs_devices->seeding = true;
688*4882a593Smuzhiyun } else {
689*4882a593Smuzhiyun if (bdev_read_only(bdev))
690*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
691*4882a593Smuzhiyun else
692*4882a593Smuzhiyun set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun q = bdev_get_queue(bdev);
696*4882a593Smuzhiyun if (!blk_queue_nonrot(q))
697*4882a593Smuzhiyun fs_devices->rotating = true;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun device->bdev = bdev;
700*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
701*4882a593Smuzhiyun device->mode = flags;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun fs_devices->open_devices++;
704*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
705*4882a593Smuzhiyun device->devid != BTRFS_DEV_REPLACE_DEVID) {
706*4882a593Smuzhiyun fs_devices->rw_devices++;
707*4882a593Smuzhiyun list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun btrfs_release_disk_super(disk_super);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun return 0;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun error_free_page:
714*4882a593Smuzhiyun btrfs_release_disk_super(disk_super);
715*4882a593Smuzhiyun blkdev_put(bdev, flags);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun return -EINVAL;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /*
721*4882a593Smuzhiyun * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
722*4882a593Smuzhiyun * being created with a disk that has already completed its fsid change. Such
723*4882a593Smuzhiyun * disk can belong to an fs which has its FSID changed or to one which doesn't.
724*4882a593Smuzhiyun * Handle both cases here.
725*4882a593Smuzhiyun */
find_fsid_inprogress(struct btrfs_super_block * disk_super)726*4882a593Smuzhiyun static struct btrfs_fs_devices *find_fsid_inprogress(
727*4882a593Smuzhiyun struct btrfs_super_block *disk_super)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
732*4882a593Smuzhiyun if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
733*4882a593Smuzhiyun BTRFS_FSID_SIZE) != 0 &&
734*4882a593Smuzhiyun memcmp(fs_devices->metadata_uuid, disk_super->fsid,
735*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
736*4882a593Smuzhiyun return fs_devices;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun return find_fsid(disk_super->fsid, NULL);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun
find_fsid_changed(struct btrfs_super_block * disk_super)744*4882a593Smuzhiyun static struct btrfs_fs_devices *find_fsid_changed(
745*4882a593Smuzhiyun struct btrfs_super_block *disk_super)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /*
750*4882a593Smuzhiyun * Handles the case where scanned device is part of an fs that had
751*4882a593Smuzhiyun * multiple successful changes of FSID but curently device didn't
752*4882a593Smuzhiyun * observe it. Meaning our fsid will be different than theirs. We need
753*4882a593Smuzhiyun * to handle two subcases :
754*4882a593Smuzhiyun * 1 - The fs still continues to have different METADATA/FSID uuids.
755*4882a593Smuzhiyun * 2 - The fs is switched back to its original FSID (METADATA/FSID
756*4882a593Smuzhiyun * are equal).
757*4882a593Smuzhiyun */
758*4882a593Smuzhiyun list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
759*4882a593Smuzhiyun /* Changed UUIDs */
760*4882a593Smuzhiyun if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
761*4882a593Smuzhiyun BTRFS_FSID_SIZE) != 0 &&
762*4882a593Smuzhiyun memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
763*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0 &&
764*4882a593Smuzhiyun memcmp(fs_devices->fsid, disk_super->fsid,
765*4882a593Smuzhiyun BTRFS_FSID_SIZE) != 0)
766*4882a593Smuzhiyun return fs_devices;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* Unchanged UUIDs */
769*4882a593Smuzhiyun if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
770*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0 &&
771*4882a593Smuzhiyun memcmp(fs_devices->fsid, disk_super->metadata_uuid,
772*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0)
773*4882a593Smuzhiyun return fs_devices;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun return NULL;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
find_fsid_reverted_metadata(struct btrfs_super_block * disk_super)779*4882a593Smuzhiyun static struct btrfs_fs_devices *find_fsid_reverted_metadata(
780*4882a593Smuzhiyun struct btrfs_super_block *disk_super)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /*
785*4882a593Smuzhiyun * Handle the case where the scanned device is part of an fs whose last
786*4882a593Smuzhiyun * metadata UUID change reverted it to the original FSID. At the same
787*4882a593Smuzhiyun * time * fs_devices was first created by another constitutent device
788*4882a593Smuzhiyun * which didn't fully observe the operation. This results in an
789*4882a593Smuzhiyun * btrfs_fs_devices created with metadata/fsid different AND
790*4882a593Smuzhiyun * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
791*4882a593Smuzhiyun * fs_devices equal to the FSID of the disk.
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
794*4882a593Smuzhiyun if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
795*4882a593Smuzhiyun BTRFS_FSID_SIZE) != 0 &&
796*4882a593Smuzhiyun memcmp(fs_devices->metadata_uuid, disk_super->fsid,
797*4882a593Smuzhiyun BTRFS_FSID_SIZE) == 0 &&
798*4882a593Smuzhiyun fs_devices->fsid_change)
799*4882a593Smuzhiyun return fs_devices;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun return NULL;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun * Add new device to list of registered devices
806*4882a593Smuzhiyun *
807*4882a593Smuzhiyun * Returns:
808*4882a593Smuzhiyun * device pointer which was just added or updated when successful
809*4882a593Smuzhiyun * error pointer when failed
810*4882a593Smuzhiyun */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)811*4882a593Smuzhiyun static noinline struct btrfs_device *device_list_add(const char *path,
812*4882a593Smuzhiyun struct btrfs_super_block *disk_super,
813*4882a593Smuzhiyun bool *new_device_added)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun struct btrfs_device *device;
816*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = NULL;
817*4882a593Smuzhiyun struct rcu_string *name;
818*4882a593Smuzhiyun u64 found_transid = btrfs_super_generation(disk_super);
819*4882a593Smuzhiyun u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
820*4882a593Smuzhiyun bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
821*4882a593Smuzhiyun BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
822*4882a593Smuzhiyun bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
823*4882a593Smuzhiyun BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (fsid_change_in_progress) {
826*4882a593Smuzhiyun if (!has_metadata_uuid)
827*4882a593Smuzhiyun fs_devices = find_fsid_inprogress(disk_super);
828*4882a593Smuzhiyun else
829*4882a593Smuzhiyun fs_devices = find_fsid_changed(disk_super);
830*4882a593Smuzhiyun } else if (has_metadata_uuid) {
831*4882a593Smuzhiyun fs_devices = find_fsid_with_metadata_uuid(disk_super);
832*4882a593Smuzhiyun } else {
833*4882a593Smuzhiyun fs_devices = find_fsid_reverted_metadata(disk_super);
834*4882a593Smuzhiyun if (!fs_devices)
835*4882a593Smuzhiyun fs_devices = find_fsid(disk_super->fsid, NULL);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (!fs_devices) {
840*4882a593Smuzhiyun if (has_metadata_uuid)
841*4882a593Smuzhiyun fs_devices = alloc_fs_devices(disk_super->fsid,
842*4882a593Smuzhiyun disk_super->metadata_uuid);
843*4882a593Smuzhiyun else
844*4882a593Smuzhiyun fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (IS_ERR(fs_devices))
847*4882a593Smuzhiyun return ERR_CAST(fs_devices);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun fs_devices->fsid_change = fsid_change_in_progress;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
852*4882a593Smuzhiyun list_add(&fs_devices->fs_list, &fs_uuids);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun device = NULL;
855*4882a593Smuzhiyun } else {
856*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
857*4882a593Smuzhiyun device = btrfs_find_device(fs_devices, devid,
858*4882a593Smuzhiyun disk_super->dev_item.uuid, NULL, false);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /*
861*4882a593Smuzhiyun * If this disk has been pulled into an fs devices created by
862*4882a593Smuzhiyun * a device which had the CHANGING_FSID_V2 flag then replace the
863*4882a593Smuzhiyun * metadata_uuid/fsid values of the fs_devices.
864*4882a593Smuzhiyun */
865*4882a593Smuzhiyun if (fs_devices->fsid_change &&
866*4882a593Smuzhiyun found_transid > fs_devices->latest_generation) {
867*4882a593Smuzhiyun memcpy(fs_devices->fsid, disk_super->fsid,
868*4882a593Smuzhiyun BTRFS_FSID_SIZE);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (has_metadata_uuid)
871*4882a593Smuzhiyun memcpy(fs_devices->metadata_uuid,
872*4882a593Smuzhiyun disk_super->metadata_uuid,
873*4882a593Smuzhiyun BTRFS_FSID_SIZE);
874*4882a593Smuzhiyun else
875*4882a593Smuzhiyun memcpy(fs_devices->metadata_uuid,
876*4882a593Smuzhiyun disk_super->fsid, BTRFS_FSID_SIZE);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun fs_devices->fsid_change = false;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun if (!device) {
883*4882a593Smuzhiyun if (fs_devices->opened) {
884*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
885*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun device = btrfs_alloc_device(NULL, &devid,
889*4882a593Smuzhiyun disk_super->dev_item.uuid);
890*4882a593Smuzhiyun if (IS_ERR(device)) {
891*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
892*4882a593Smuzhiyun /* we can safely leave the fs_devices entry around */
893*4882a593Smuzhiyun return device;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun name = rcu_string_strdup(path, GFP_NOFS);
897*4882a593Smuzhiyun if (!name) {
898*4882a593Smuzhiyun btrfs_free_device(device);
899*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
900*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun rcu_assign_pointer(device->name, name);
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun list_add_rcu(&device->dev_list, &fs_devices->devices);
905*4882a593Smuzhiyun fs_devices->num_devices++;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun device->fs_devices = fs_devices;
908*4882a593Smuzhiyun *new_device_added = true;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (disk_super->label[0])
911*4882a593Smuzhiyun pr_info(
912*4882a593Smuzhiyun "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
913*4882a593Smuzhiyun disk_super->label, devid, found_transid, path,
914*4882a593Smuzhiyun current->comm, task_pid_nr(current));
915*4882a593Smuzhiyun else
916*4882a593Smuzhiyun pr_info(
917*4882a593Smuzhiyun "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
918*4882a593Smuzhiyun disk_super->fsid, devid, found_transid, path,
919*4882a593Smuzhiyun current->comm, task_pid_nr(current));
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun } else if (!device->name || strcmp(device->name->str, path)) {
922*4882a593Smuzhiyun /*
923*4882a593Smuzhiyun * When FS is already mounted.
924*4882a593Smuzhiyun * 1. If you are here and if the device->name is NULL that
925*4882a593Smuzhiyun * means this device was missing at time of FS mount.
926*4882a593Smuzhiyun * 2. If you are here and if the device->name is different
927*4882a593Smuzhiyun * from 'path' that means either
928*4882a593Smuzhiyun * a. The same device disappeared and reappeared with
929*4882a593Smuzhiyun * different name. or
930*4882a593Smuzhiyun * b. The missing-disk-which-was-replaced, has
931*4882a593Smuzhiyun * reappeared now.
932*4882a593Smuzhiyun *
933*4882a593Smuzhiyun * We must allow 1 and 2a above. But 2b would be a spurious
934*4882a593Smuzhiyun * and unintentional.
935*4882a593Smuzhiyun *
936*4882a593Smuzhiyun * Further in case of 1 and 2a above, the disk at 'path'
937*4882a593Smuzhiyun * would have missed some transaction when it was away and
938*4882a593Smuzhiyun * in case of 2a the stale bdev has to be updated as well.
939*4882a593Smuzhiyun * 2b must not be allowed at all time.
940*4882a593Smuzhiyun */
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun /*
943*4882a593Smuzhiyun * For now, we do allow update to btrfs_fs_device through the
944*4882a593Smuzhiyun * btrfs dev scan cli after FS has been mounted. We're still
945*4882a593Smuzhiyun * tracking a problem where systems fail mount by subvolume id
946*4882a593Smuzhiyun * when we reject replacement on a mounted FS.
947*4882a593Smuzhiyun */
948*4882a593Smuzhiyun if (!fs_devices->opened && found_transid < device->generation) {
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun * That is if the FS is _not_ mounted and if you
951*4882a593Smuzhiyun * are here, that means there is more than one
952*4882a593Smuzhiyun * disk with same uuid and devid.We keep the one
953*4882a593Smuzhiyun * with larger generation number or the last-in if
954*4882a593Smuzhiyun * generation are equal.
955*4882a593Smuzhiyun */
956*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
957*4882a593Smuzhiyun return ERR_PTR(-EEXIST);
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun /*
961*4882a593Smuzhiyun * We are going to replace the device path for a given devid,
962*4882a593Smuzhiyun * make sure it's the same device if the device is mounted
963*4882a593Smuzhiyun */
964*4882a593Smuzhiyun if (device->bdev) {
965*4882a593Smuzhiyun struct block_device *path_bdev;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun path_bdev = lookup_bdev(path);
968*4882a593Smuzhiyun if (IS_ERR(path_bdev)) {
969*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
970*4882a593Smuzhiyun return ERR_CAST(path_bdev);
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun if (device->bdev != path_bdev) {
974*4882a593Smuzhiyun bdput(path_bdev);
975*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
976*4882a593Smuzhiyun /*
977*4882a593Smuzhiyun * device->fs_info may not be reliable here, so
978*4882a593Smuzhiyun * pass in a NULL instead. This avoids a
979*4882a593Smuzhiyun * possible use-after-free when the fs_info and
980*4882a593Smuzhiyun * fs_info->sb are already torn down.
981*4882a593Smuzhiyun */
982*4882a593Smuzhiyun btrfs_warn_in_rcu(NULL,
983*4882a593Smuzhiyun "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
984*4882a593Smuzhiyun path, devid, found_transid,
985*4882a593Smuzhiyun current->comm,
986*4882a593Smuzhiyun task_pid_nr(current));
987*4882a593Smuzhiyun return ERR_PTR(-EEXIST);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun bdput(path_bdev);
990*4882a593Smuzhiyun btrfs_info_in_rcu(device->fs_info,
991*4882a593Smuzhiyun "devid %llu device path %s changed to %s scanned by %s (%d)",
992*4882a593Smuzhiyun devid, rcu_str_deref(device->name),
993*4882a593Smuzhiyun path, current->comm,
994*4882a593Smuzhiyun task_pid_nr(current));
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun name = rcu_string_strdup(path, GFP_NOFS);
998*4882a593Smuzhiyun if (!name) {
999*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
1000*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun rcu_string_free(device->name);
1003*4882a593Smuzhiyun rcu_assign_pointer(device->name, name);
1004*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1005*4882a593Smuzhiyun fs_devices->missing_devices--;
1006*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun /*
1011*4882a593Smuzhiyun * Unmount does not free the btrfs_device struct but would zero
1012*4882a593Smuzhiyun * generation along with most of the other members. So just update
1013*4882a593Smuzhiyun * it back. We need it to pick the disk with largest generation
1014*4882a593Smuzhiyun * (as above).
1015*4882a593Smuzhiyun */
1016*4882a593Smuzhiyun if (!fs_devices->opened) {
1017*4882a593Smuzhiyun device->generation = found_transid;
1018*4882a593Smuzhiyun fs_devices->latest_generation = max_t(u64, found_transid,
1019*4882a593Smuzhiyun fs_devices->latest_generation);
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
1025*4882a593Smuzhiyun return device;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun
clone_fs_devices(struct btrfs_fs_devices * orig)1028*4882a593Smuzhiyun static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
1031*4882a593Smuzhiyun struct btrfs_device *device;
1032*4882a593Smuzhiyun struct btrfs_device *orig_dev;
1033*4882a593Smuzhiyun int ret = 0;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun lockdep_assert_held(&uuid_mutex);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun fs_devices = alloc_fs_devices(orig->fsid, NULL);
1038*4882a593Smuzhiyun if (IS_ERR(fs_devices))
1039*4882a593Smuzhiyun return fs_devices;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun fs_devices->total_devices = orig->total_devices;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1044*4882a593Smuzhiyun struct rcu_string *name;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun device = btrfs_alloc_device(NULL, &orig_dev->devid,
1047*4882a593Smuzhiyun orig_dev->uuid);
1048*4882a593Smuzhiyun if (IS_ERR(device)) {
1049*4882a593Smuzhiyun ret = PTR_ERR(device);
1050*4882a593Smuzhiyun goto error;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun /*
1054*4882a593Smuzhiyun * This is ok to do without rcu read locked because we hold the
1055*4882a593Smuzhiyun * uuid mutex so nothing we touch in here is going to disappear.
1056*4882a593Smuzhiyun */
1057*4882a593Smuzhiyun if (orig_dev->name) {
1058*4882a593Smuzhiyun name = rcu_string_strdup(orig_dev->name->str,
1059*4882a593Smuzhiyun GFP_KERNEL);
1060*4882a593Smuzhiyun if (!name) {
1061*4882a593Smuzhiyun btrfs_free_device(device);
1062*4882a593Smuzhiyun ret = -ENOMEM;
1063*4882a593Smuzhiyun goto error;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun rcu_assign_pointer(device->name, name);
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun list_add(&device->dev_list, &fs_devices->devices);
1069*4882a593Smuzhiyun device->fs_devices = fs_devices;
1070*4882a593Smuzhiyun fs_devices->num_devices++;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun return fs_devices;
1073*4882a593Smuzhiyun error:
1074*4882a593Smuzhiyun free_fs_devices(fs_devices);
1075*4882a593Smuzhiyun return ERR_PTR(ret);
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
__btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step,struct btrfs_device ** latest_dev)1078*4882a593Smuzhiyun static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1079*4882a593Smuzhiyun int step, struct btrfs_device **latest_dev)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun struct btrfs_device *device, *next;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun /* This is the initialized path, it is safe to release the devices. */
1084*4882a593Smuzhiyun list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1085*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1086*4882a593Smuzhiyun if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1087*4882a593Smuzhiyun &device->dev_state) &&
1088*4882a593Smuzhiyun !test_bit(BTRFS_DEV_STATE_MISSING,
1089*4882a593Smuzhiyun &device->dev_state) &&
1090*4882a593Smuzhiyun (!*latest_dev ||
1091*4882a593Smuzhiyun device->generation > (*latest_dev)->generation)) {
1092*4882a593Smuzhiyun *latest_dev = device;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun continue;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun /*
1098*4882a593Smuzhiyun * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1099*4882a593Smuzhiyun * in btrfs_init_dev_replace() so just continue.
1100*4882a593Smuzhiyun */
1101*4882a593Smuzhiyun if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1102*4882a593Smuzhiyun continue;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun if (device->bdev) {
1105*4882a593Smuzhiyun blkdev_put(device->bdev, device->mode);
1106*4882a593Smuzhiyun device->bdev = NULL;
1107*4882a593Smuzhiyun fs_devices->open_devices--;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1110*4882a593Smuzhiyun list_del_init(&device->dev_alloc_list);
1111*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1112*4882a593Smuzhiyun fs_devices->rw_devices--;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun list_del_init(&device->dev_list);
1115*4882a593Smuzhiyun fs_devices->num_devices--;
1116*4882a593Smuzhiyun btrfs_free_device(device);
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /*
1122*4882a593Smuzhiyun * After we have read the system tree and know devids belonging to this
1123*4882a593Smuzhiyun * filesystem, remove the device which does not belong there.
1124*4882a593Smuzhiyun */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step)1125*4882a593Smuzhiyun void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun struct btrfs_device *latest_dev = NULL;
1128*4882a593Smuzhiyun struct btrfs_fs_devices *seed_dev;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun mutex_lock(&uuid_mutex);
1131*4882a593Smuzhiyun __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1134*4882a593Smuzhiyun __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun fs_devices->latest_bdev = latest_dev->bdev;
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun mutex_unlock(&uuid_mutex);
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun
btrfs_close_bdev(struct btrfs_device * device)1141*4882a593Smuzhiyun static void btrfs_close_bdev(struct btrfs_device *device)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun if (!device->bdev)
1144*4882a593Smuzhiyun return;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1147*4882a593Smuzhiyun sync_blockdev(device->bdev);
1148*4882a593Smuzhiyun invalidate_bdev(device->bdev);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun blkdev_put(device->bdev, device->mode);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
btrfs_close_one_device(struct btrfs_device * device)1154*4882a593Smuzhiyun static void btrfs_close_one_device(struct btrfs_device *device)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = device->fs_devices;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1159*4882a593Smuzhiyun device->devid != BTRFS_DEV_REPLACE_DEVID) {
1160*4882a593Smuzhiyun list_del_init(&device->dev_alloc_list);
1161*4882a593Smuzhiyun fs_devices->rw_devices--;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1165*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1168*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1169*4882a593Smuzhiyun fs_devices->missing_devices--;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun btrfs_close_bdev(device);
1173*4882a593Smuzhiyun if (device->bdev) {
1174*4882a593Smuzhiyun fs_devices->open_devices--;
1175*4882a593Smuzhiyun device->bdev = NULL;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun device->fs_info = NULL;
1180*4882a593Smuzhiyun atomic_set(&device->dev_stats_ccnt, 0);
1181*4882a593Smuzhiyun extent_io_tree_release(&device->alloc_state);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun /*
1184*4882a593Smuzhiyun * Reset the flush error record. We might have a transient flush error
1185*4882a593Smuzhiyun * in this mount, and if so we aborted the current transaction and set
1186*4882a593Smuzhiyun * the fs to an error state, guaranteeing no super blocks can be further
1187*4882a593Smuzhiyun * committed. However that error might be transient and if we unmount the
1188*4882a593Smuzhiyun * filesystem and mount it again, we should allow the mount to succeed
1189*4882a593Smuzhiyun * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1190*4882a593Smuzhiyun * filesystem again we still get flush errors, then we will again abort
1191*4882a593Smuzhiyun * any transaction and set the error state, guaranteeing no commits of
1192*4882a593Smuzhiyun * unsafe super blocks.
1193*4882a593Smuzhiyun */
1194*4882a593Smuzhiyun device->last_flush_error = 0;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* Verify the device is back in a pristine state */
1197*4882a593Smuzhiyun ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1198*4882a593Smuzhiyun ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1199*4882a593Smuzhiyun ASSERT(list_empty(&device->dev_alloc_list));
1200*4882a593Smuzhiyun ASSERT(list_empty(&device->post_commit_list));
1201*4882a593Smuzhiyun ASSERT(atomic_read(&device->reada_in_flight) == 0);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
close_fs_devices(struct btrfs_fs_devices * fs_devices)1204*4882a593Smuzhiyun static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun struct btrfs_device *device, *tmp;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun lockdep_assert_held(&uuid_mutex);
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun if (--fs_devices->opened > 0)
1211*4882a593Smuzhiyun return;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1214*4882a593Smuzhiyun btrfs_close_one_device(device);
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun WARN_ON(fs_devices->open_devices);
1217*4882a593Smuzhiyun WARN_ON(fs_devices->rw_devices);
1218*4882a593Smuzhiyun fs_devices->opened = 0;
1219*4882a593Smuzhiyun fs_devices->seeding = false;
1220*4882a593Smuzhiyun fs_devices->fs_info = NULL;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1223*4882a593Smuzhiyun void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun LIST_HEAD(list);
1226*4882a593Smuzhiyun struct btrfs_fs_devices *tmp;
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun mutex_lock(&uuid_mutex);
1229*4882a593Smuzhiyun close_fs_devices(fs_devices);
1230*4882a593Smuzhiyun if (!fs_devices->opened)
1231*4882a593Smuzhiyun list_splice_init(&fs_devices->seed_list, &list);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1234*4882a593Smuzhiyun close_fs_devices(fs_devices);
1235*4882a593Smuzhiyun list_del(&fs_devices->seed_list);
1236*4882a593Smuzhiyun free_fs_devices(fs_devices);
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun mutex_unlock(&uuid_mutex);
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
open_fs_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1241*4882a593Smuzhiyun static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1242*4882a593Smuzhiyun fmode_t flags, void *holder)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun struct btrfs_device *device;
1245*4882a593Smuzhiyun struct btrfs_device *latest_dev = NULL;
1246*4882a593Smuzhiyun struct btrfs_device *tmp_device;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun flags |= FMODE_EXCL;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1251*4882a593Smuzhiyun dev_list) {
1252*4882a593Smuzhiyun int ret;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1255*4882a593Smuzhiyun if (ret == 0 &&
1256*4882a593Smuzhiyun (!latest_dev || device->generation > latest_dev->generation)) {
1257*4882a593Smuzhiyun latest_dev = device;
1258*4882a593Smuzhiyun } else if (ret == -ENODATA) {
1259*4882a593Smuzhiyun fs_devices->num_devices--;
1260*4882a593Smuzhiyun list_del(&device->dev_list);
1261*4882a593Smuzhiyun btrfs_free_device(device);
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun if (fs_devices->open_devices == 0)
1265*4882a593Smuzhiyun return -EINVAL;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun fs_devices->opened = 1;
1268*4882a593Smuzhiyun fs_devices->latest_bdev = latest_dev->bdev;
1269*4882a593Smuzhiyun fs_devices->total_rw_bytes = 0;
1270*4882a593Smuzhiyun fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun return 0;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
devid_cmp(void * priv,struct list_head * a,struct list_head * b)1275*4882a593Smuzhiyun static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun struct btrfs_device *dev1, *dev2;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun dev1 = list_entry(a, struct btrfs_device, dev_list);
1280*4882a593Smuzhiyun dev2 = list_entry(b, struct btrfs_device, dev_list);
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun if (dev1->devid < dev2->devid)
1283*4882a593Smuzhiyun return -1;
1284*4882a593Smuzhiyun else if (dev1->devid > dev2->devid)
1285*4882a593Smuzhiyun return 1;
1286*4882a593Smuzhiyun return 0;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1289*4882a593Smuzhiyun int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1290*4882a593Smuzhiyun fmode_t flags, void *holder)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun int ret;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun lockdep_assert_held(&uuid_mutex);
1295*4882a593Smuzhiyun /*
1296*4882a593Smuzhiyun * The device_list_mutex cannot be taken here in case opening the
1297*4882a593Smuzhiyun * underlying device takes further locks like bd_mutex.
1298*4882a593Smuzhiyun *
1299*4882a593Smuzhiyun * We also don't need the lock here as this is called during mount and
1300*4882a593Smuzhiyun * exclusion is provided by uuid_mutex
1301*4882a593Smuzhiyun */
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun if (fs_devices->opened) {
1304*4882a593Smuzhiyun fs_devices->opened++;
1305*4882a593Smuzhiyun ret = 0;
1306*4882a593Smuzhiyun } else {
1307*4882a593Smuzhiyun list_sort(NULL, &fs_devices->devices, devid_cmp);
1308*4882a593Smuzhiyun ret = open_fs_devices(fs_devices, flags, holder);
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun return ret;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
btrfs_release_disk_super(struct btrfs_super_block * super)1314*4882a593Smuzhiyun void btrfs_release_disk_super(struct btrfs_super_block *super)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun struct page *page = virt_to_page(super);
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun put_page(page);
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr)1321*4882a593Smuzhiyun static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1322*4882a593Smuzhiyun u64 bytenr)
1323*4882a593Smuzhiyun {
1324*4882a593Smuzhiyun struct btrfs_super_block *disk_super;
1325*4882a593Smuzhiyun struct page *page;
1326*4882a593Smuzhiyun void *p;
1327*4882a593Smuzhiyun pgoff_t index;
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun /* make sure our super fits in the device */
1330*4882a593Smuzhiyun if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1331*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun /* make sure our super fits in the page */
1334*4882a593Smuzhiyun if (sizeof(*disk_super) > PAGE_SIZE)
1335*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun /* make sure our super doesn't straddle pages on disk */
1338*4882a593Smuzhiyun index = bytenr >> PAGE_SHIFT;
1339*4882a593Smuzhiyun if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1340*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /* pull in the page with our super */
1343*4882a593Smuzhiyun page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun if (IS_ERR(page))
1346*4882a593Smuzhiyun return ERR_CAST(page);
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun p = page_address(page);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun /* align our pointer to the offset of the super block */
1351*4882a593Smuzhiyun disk_super = p + offset_in_page(bytenr);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun if (btrfs_super_bytenr(disk_super) != bytenr ||
1354*4882a593Smuzhiyun btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1355*4882a593Smuzhiyun btrfs_release_disk_super(p);
1356*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1360*4882a593Smuzhiyun disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun return disk_super;
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun
btrfs_forget_devices(const char * path)1365*4882a593Smuzhiyun int btrfs_forget_devices(const char *path)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun int ret;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun mutex_lock(&uuid_mutex);
1370*4882a593Smuzhiyun ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1371*4882a593Smuzhiyun mutex_unlock(&uuid_mutex);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun return ret;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun /*
1377*4882a593Smuzhiyun * Look for a btrfs signature on a device. This may be called out of the mount path
1378*4882a593Smuzhiyun * and we are not allowed to call set_blocksize during the scan. The superblock
1379*4882a593Smuzhiyun * is read via pagecache
1380*4882a593Smuzhiyun */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder)1381*4882a593Smuzhiyun struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1382*4882a593Smuzhiyun void *holder)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun struct btrfs_super_block *disk_super;
1385*4882a593Smuzhiyun bool new_device_added = false;
1386*4882a593Smuzhiyun struct btrfs_device *device = NULL;
1387*4882a593Smuzhiyun struct block_device *bdev;
1388*4882a593Smuzhiyun u64 bytenr;
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun lockdep_assert_held(&uuid_mutex);
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun /*
1393*4882a593Smuzhiyun * we would like to check all the supers, but that would make
1394*4882a593Smuzhiyun * a btrfs mount succeed after a mkfs from a different FS.
1395*4882a593Smuzhiyun * So, we need to add a special mount option to scan for
1396*4882a593Smuzhiyun * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1397*4882a593Smuzhiyun */
1398*4882a593Smuzhiyun bytenr = btrfs_sb_offset(0);
1399*4882a593Smuzhiyun flags |= FMODE_EXCL;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun bdev = blkdev_get_by_path(path, flags, holder);
1402*4882a593Smuzhiyun if (IS_ERR(bdev))
1403*4882a593Smuzhiyun return ERR_CAST(bdev);
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun disk_super = btrfs_read_disk_super(bdev, bytenr);
1406*4882a593Smuzhiyun if (IS_ERR(disk_super)) {
1407*4882a593Smuzhiyun device = ERR_CAST(disk_super);
1408*4882a593Smuzhiyun goto error_bdev_put;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun device = device_list_add(path, disk_super, &new_device_added);
1412*4882a593Smuzhiyun if (!IS_ERR(device)) {
1413*4882a593Smuzhiyun if (new_device_added)
1414*4882a593Smuzhiyun btrfs_free_stale_devices(path, device);
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun btrfs_release_disk_super(disk_super);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun error_bdev_put:
1420*4882a593Smuzhiyun blkdev_put(bdev, flags);
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun return device;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun /*
1426*4882a593Smuzhiyun * Try to find a chunk that intersects [start, start + len] range and when one
1427*4882a593Smuzhiyun * such is found, record the end of it in *start
1428*4882a593Smuzhiyun */
contains_pending_extent(struct btrfs_device * device,u64 * start,u64 len)1429*4882a593Smuzhiyun static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1430*4882a593Smuzhiyun u64 len)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun u64 physical_start, physical_end;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun lockdep_assert_held(&device->fs_info->chunk_mutex);
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun if (!find_first_extent_bit(&device->alloc_state, *start,
1437*4882a593Smuzhiyun &physical_start, &physical_end,
1438*4882a593Smuzhiyun CHUNK_ALLOCATED, NULL)) {
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun if (in_range(physical_start, *start, len) ||
1441*4882a593Smuzhiyun in_range(*start, physical_start,
1442*4882a593Smuzhiyun physical_end - physical_start)) {
1443*4882a593Smuzhiyun *start = physical_end + 1;
1444*4882a593Smuzhiyun return true;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun return false;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
dev_extent_search_start(struct btrfs_device * device,u64 start)1450*4882a593Smuzhiyun static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun switch (device->fs_devices->chunk_alloc_policy) {
1453*4882a593Smuzhiyun case BTRFS_CHUNK_ALLOC_REGULAR:
1454*4882a593Smuzhiyun /*
1455*4882a593Smuzhiyun * We don't want to overwrite the superblock on the drive nor
1456*4882a593Smuzhiyun * any area used by the boot loader (grub for example), so we
1457*4882a593Smuzhiyun * make sure to start at an offset of at least 1MB.
1458*4882a593Smuzhiyun */
1459*4882a593Smuzhiyun return max_t(u64, start, SZ_1M);
1460*4882a593Smuzhiyun default:
1461*4882a593Smuzhiyun BUG();
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun /**
1466*4882a593Smuzhiyun * dev_extent_hole_check - check if specified hole is suitable for allocation
1467*4882a593Smuzhiyun * @device: the device which we have the hole
1468*4882a593Smuzhiyun * @hole_start: starting position of the hole
1469*4882a593Smuzhiyun * @hole_size: the size of the hole
1470*4882a593Smuzhiyun * @num_bytes: the size of the free space that we need
1471*4882a593Smuzhiyun *
1472*4882a593Smuzhiyun * This function may modify @hole_start and @hole_end to reflect the suitable
1473*4882a593Smuzhiyun * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1474*4882a593Smuzhiyun */
dev_extent_hole_check(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1475*4882a593Smuzhiyun static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1476*4882a593Smuzhiyun u64 *hole_size, u64 num_bytes)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun bool changed = false;
1479*4882a593Smuzhiyun u64 hole_end = *hole_start + *hole_size;
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun /*
1482*4882a593Smuzhiyun * Check before we set max_hole_start, otherwise we could end up
1483*4882a593Smuzhiyun * sending back this offset anyway.
1484*4882a593Smuzhiyun */
1485*4882a593Smuzhiyun if (contains_pending_extent(device, hole_start, *hole_size)) {
1486*4882a593Smuzhiyun if (hole_end >= *hole_start)
1487*4882a593Smuzhiyun *hole_size = hole_end - *hole_start;
1488*4882a593Smuzhiyun else
1489*4882a593Smuzhiyun *hole_size = 0;
1490*4882a593Smuzhiyun changed = true;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun switch (device->fs_devices->chunk_alloc_policy) {
1494*4882a593Smuzhiyun case BTRFS_CHUNK_ALLOC_REGULAR:
1495*4882a593Smuzhiyun /* No extra check */
1496*4882a593Smuzhiyun break;
1497*4882a593Smuzhiyun default:
1498*4882a593Smuzhiyun BUG();
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun return changed;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun /*
1505*4882a593Smuzhiyun * find_free_dev_extent_start - find free space in the specified device
1506*4882a593Smuzhiyun * @device: the device which we search the free space in
1507*4882a593Smuzhiyun * @num_bytes: the size of the free space that we need
1508*4882a593Smuzhiyun * @search_start: the position from which to begin the search
1509*4882a593Smuzhiyun * @start: store the start of the free space.
1510*4882a593Smuzhiyun * @len: the size of the free space. that we find, or the size
1511*4882a593Smuzhiyun * of the max free space if we don't find suitable free space
1512*4882a593Smuzhiyun *
1513*4882a593Smuzhiyun * this uses a pretty simple search, the expectation is that it is
1514*4882a593Smuzhiyun * called very infrequently and that a given device has a small number
1515*4882a593Smuzhiyun * of extents
1516*4882a593Smuzhiyun *
1517*4882a593Smuzhiyun * @start is used to store the start of the free space if we find. But if we
1518*4882a593Smuzhiyun * don't find suitable free space, it will be used to store the start position
1519*4882a593Smuzhiyun * of the max free space.
1520*4882a593Smuzhiyun *
1521*4882a593Smuzhiyun * @len is used to store the size of the free space that we find.
1522*4882a593Smuzhiyun * But if we don't find suitable free space, it is used to store the size of
1523*4882a593Smuzhiyun * the max free space.
1524*4882a593Smuzhiyun *
1525*4882a593Smuzhiyun * NOTE: This function will search *commit* root of device tree, and does extra
1526*4882a593Smuzhiyun * check to ensure dev extents are not double allocated.
1527*4882a593Smuzhiyun * This makes the function safe to allocate dev extents but may not report
1528*4882a593Smuzhiyun * correct usable device space, as device extent freed in current transaction
1529*4882a593Smuzhiyun * is not reported as avaiable.
1530*4882a593Smuzhiyun */
find_free_dev_extent_start(struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1531*4882a593Smuzhiyun static int find_free_dev_extent_start(struct btrfs_device *device,
1532*4882a593Smuzhiyun u64 num_bytes, u64 search_start, u64 *start,
1533*4882a593Smuzhiyun u64 *len)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = device->fs_info;
1536*4882a593Smuzhiyun struct btrfs_root *root = fs_info->dev_root;
1537*4882a593Smuzhiyun struct btrfs_key key;
1538*4882a593Smuzhiyun struct btrfs_dev_extent *dev_extent;
1539*4882a593Smuzhiyun struct btrfs_path *path;
1540*4882a593Smuzhiyun u64 hole_size;
1541*4882a593Smuzhiyun u64 max_hole_start;
1542*4882a593Smuzhiyun u64 max_hole_size;
1543*4882a593Smuzhiyun u64 extent_end;
1544*4882a593Smuzhiyun u64 search_end = device->total_bytes;
1545*4882a593Smuzhiyun int ret;
1546*4882a593Smuzhiyun int slot;
1547*4882a593Smuzhiyun struct extent_buffer *l;
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun search_start = dev_extent_search_start(device, search_start);
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun path = btrfs_alloc_path();
1552*4882a593Smuzhiyun if (!path)
1553*4882a593Smuzhiyun return -ENOMEM;
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun max_hole_start = search_start;
1556*4882a593Smuzhiyun max_hole_size = 0;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun again:
1559*4882a593Smuzhiyun if (search_start >= search_end ||
1560*4882a593Smuzhiyun test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1561*4882a593Smuzhiyun ret = -ENOSPC;
1562*4882a593Smuzhiyun goto out;
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun path->reada = READA_FORWARD;
1566*4882a593Smuzhiyun path->search_commit_root = 1;
1567*4882a593Smuzhiyun path->skip_locking = 1;
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun key.objectid = device->devid;
1570*4882a593Smuzhiyun key.offset = search_start;
1571*4882a593Smuzhiyun key.type = BTRFS_DEV_EXTENT_KEY;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1574*4882a593Smuzhiyun if (ret < 0)
1575*4882a593Smuzhiyun goto out;
1576*4882a593Smuzhiyun if (ret > 0) {
1577*4882a593Smuzhiyun ret = btrfs_previous_item(root, path, key.objectid, key.type);
1578*4882a593Smuzhiyun if (ret < 0)
1579*4882a593Smuzhiyun goto out;
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun while (1) {
1583*4882a593Smuzhiyun l = path->nodes[0];
1584*4882a593Smuzhiyun slot = path->slots[0];
1585*4882a593Smuzhiyun if (slot >= btrfs_header_nritems(l)) {
1586*4882a593Smuzhiyun ret = btrfs_next_leaf(root, path);
1587*4882a593Smuzhiyun if (ret == 0)
1588*4882a593Smuzhiyun continue;
1589*4882a593Smuzhiyun if (ret < 0)
1590*4882a593Smuzhiyun goto out;
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun break;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun btrfs_item_key_to_cpu(l, &key, slot);
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun if (key.objectid < device->devid)
1597*4882a593Smuzhiyun goto next;
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun if (key.objectid > device->devid)
1600*4882a593Smuzhiyun break;
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun if (key.type != BTRFS_DEV_EXTENT_KEY)
1603*4882a593Smuzhiyun goto next;
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun if (key.offset > search_start) {
1606*4882a593Smuzhiyun hole_size = key.offset - search_start;
1607*4882a593Smuzhiyun dev_extent_hole_check(device, &search_start, &hole_size,
1608*4882a593Smuzhiyun num_bytes);
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun if (hole_size > max_hole_size) {
1611*4882a593Smuzhiyun max_hole_start = search_start;
1612*4882a593Smuzhiyun max_hole_size = hole_size;
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun /*
1616*4882a593Smuzhiyun * If this free space is greater than which we need,
1617*4882a593Smuzhiyun * it must be the max free space that we have found
1618*4882a593Smuzhiyun * until now, so max_hole_start must point to the start
1619*4882a593Smuzhiyun * of this free space and the length of this free space
1620*4882a593Smuzhiyun * is stored in max_hole_size. Thus, we return
1621*4882a593Smuzhiyun * max_hole_start and max_hole_size and go back to the
1622*4882a593Smuzhiyun * caller.
1623*4882a593Smuzhiyun */
1624*4882a593Smuzhiyun if (hole_size >= num_bytes) {
1625*4882a593Smuzhiyun ret = 0;
1626*4882a593Smuzhiyun goto out;
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1631*4882a593Smuzhiyun extent_end = key.offset + btrfs_dev_extent_length(l,
1632*4882a593Smuzhiyun dev_extent);
1633*4882a593Smuzhiyun if (extent_end > search_start)
1634*4882a593Smuzhiyun search_start = extent_end;
1635*4882a593Smuzhiyun next:
1636*4882a593Smuzhiyun path->slots[0]++;
1637*4882a593Smuzhiyun cond_resched();
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun /*
1641*4882a593Smuzhiyun * At this point, search_start should be the end of
1642*4882a593Smuzhiyun * allocated dev extents, and when shrinking the device,
1643*4882a593Smuzhiyun * search_end may be smaller than search_start.
1644*4882a593Smuzhiyun */
1645*4882a593Smuzhiyun if (search_end > search_start) {
1646*4882a593Smuzhiyun hole_size = search_end - search_start;
1647*4882a593Smuzhiyun if (dev_extent_hole_check(device, &search_start, &hole_size,
1648*4882a593Smuzhiyun num_bytes)) {
1649*4882a593Smuzhiyun btrfs_release_path(path);
1650*4882a593Smuzhiyun goto again;
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun if (hole_size > max_hole_size) {
1654*4882a593Smuzhiyun max_hole_start = search_start;
1655*4882a593Smuzhiyun max_hole_size = hole_size;
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun /* See above. */
1660*4882a593Smuzhiyun if (max_hole_size < num_bytes)
1661*4882a593Smuzhiyun ret = -ENOSPC;
1662*4882a593Smuzhiyun else
1663*4882a593Smuzhiyun ret = 0;
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun out:
1666*4882a593Smuzhiyun btrfs_free_path(path);
1667*4882a593Smuzhiyun *start = max_hole_start;
1668*4882a593Smuzhiyun if (len)
1669*4882a593Smuzhiyun *len = max_hole_size;
1670*4882a593Smuzhiyun return ret;
1671*4882a593Smuzhiyun }
1672*4882a593Smuzhiyun
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1673*4882a593Smuzhiyun int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1674*4882a593Smuzhiyun u64 *start, u64 *len)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun /* FIXME use last free of some kind */
1677*4882a593Smuzhiyun return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1678*4882a593Smuzhiyun }
1679*4882a593Smuzhiyun
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1680*4882a593Smuzhiyun static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1681*4882a593Smuzhiyun struct btrfs_device *device,
1682*4882a593Smuzhiyun u64 start, u64 *dev_extent_len)
1683*4882a593Smuzhiyun {
1684*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = device->fs_info;
1685*4882a593Smuzhiyun struct btrfs_root *root = fs_info->dev_root;
1686*4882a593Smuzhiyun int ret;
1687*4882a593Smuzhiyun struct btrfs_path *path;
1688*4882a593Smuzhiyun struct btrfs_key key;
1689*4882a593Smuzhiyun struct btrfs_key found_key;
1690*4882a593Smuzhiyun struct extent_buffer *leaf = NULL;
1691*4882a593Smuzhiyun struct btrfs_dev_extent *extent = NULL;
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun path = btrfs_alloc_path();
1694*4882a593Smuzhiyun if (!path)
1695*4882a593Smuzhiyun return -ENOMEM;
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun key.objectid = device->devid;
1698*4882a593Smuzhiyun key.offset = start;
1699*4882a593Smuzhiyun key.type = BTRFS_DEV_EXTENT_KEY;
1700*4882a593Smuzhiyun again:
1701*4882a593Smuzhiyun ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1702*4882a593Smuzhiyun if (ret > 0) {
1703*4882a593Smuzhiyun ret = btrfs_previous_item(root, path, key.objectid,
1704*4882a593Smuzhiyun BTRFS_DEV_EXTENT_KEY);
1705*4882a593Smuzhiyun if (ret)
1706*4882a593Smuzhiyun goto out;
1707*4882a593Smuzhiyun leaf = path->nodes[0];
1708*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1709*4882a593Smuzhiyun extent = btrfs_item_ptr(leaf, path->slots[0],
1710*4882a593Smuzhiyun struct btrfs_dev_extent);
1711*4882a593Smuzhiyun BUG_ON(found_key.offset > start || found_key.offset +
1712*4882a593Smuzhiyun btrfs_dev_extent_length(leaf, extent) < start);
1713*4882a593Smuzhiyun key = found_key;
1714*4882a593Smuzhiyun btrfs_release_path(path);
1715*4882a593Smuzhiyun goto again;
1716*4882a593Smuzhiyun } else if (ret == 0) {
1717*4882a593Smuzhiyun leaf = path->nodes[0];
1718*4882a593Smuzhiyun extent = btrfs_item_ptr(leaf, path->slots[0],
1719*4882a593Smuzhiyun struct btrfs_dev_extent);
1720*4882a593Smuzhiyun } else {
1721*4882a593Smuzhiyun btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1722*4882a593Smuzhiyun goto out;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun ret = btrfs_del_item(trans, root, path);
1728*4882a593Smuzhiyun if (ret) {
1729*4882a593Smuzhiyun btrfs_handle_fs_error(fs_info, ret,
1730*4882a593Smuzhiyun "Failed to remove dev extent item");
1731*4882a593Smuzhiyun } else {
1732*4882a593Smuzhiyun set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun out:
1735*4882a593Smuzhiyun btrfs_free_path(path);
1736*4882a593Smuzhiyun return ret;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_offset,u64 start,u64 num_bytes)1739*4882a593Smuzhiyun static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1740*4882a593Smuzhiyun struct btrfs_device *device,
1741*4882a593Smuzhiyun u64 chunk_offset, u64 start, u64 num_bytes)
1742*4882a593Smuzhiyun {
1743*4882a593Smuzhiyun int ret;
1744*4882a593Smuzhiyun struct btrfs_path *path;
1745*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = device->fs_info;
1746*4882a593Smuzhiyun struct btrfs_root *root = fs_info->dev_root;
1747*4882a593Smuzhiyun struct btrfs_dev_extent *extent;
1748*4882a593Smuzhiyun struct extent_buffer *leaf;
1749*4882a593Smuzhiyun struct btrfs_key key;
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1752*4882a593Smuzhiyun WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1753*4882a593Smuzhiyun path = btrfs_alloc_path();
1754*4882a593Smuzhiyun if (!path)
1755*4882a593Smuzhiyun return -ENOMEM;
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun key.objectid = device->devid;
1758*4882a593Smuzhiyun key.offset = start;
1759*4882a593Smuzhiyun key.type = BTRFS_DEV_EXTENT_KEY;
1760*4882a593Smuzhiyun ret = btrfs_insert_empty_item(trans, root, path, &key,
1761*4882a593Smuzhiyun sizeof(*extent));
1762*4882a593Smuzhiyun if (ret)
1763*4882a593Smuzhiyun goto out;
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun leaf = path->nodes[0];
1766*4882a593Smuzhiyun extent = btrfs_item_ptr(leaf, path->slots[0],
1767*4882a593Smuzhiyun struct btrfs_dev_extent);
1768*4882a593Smuzhiyun btrfs_set_dev_extent_chunk_tree(leaf, extent,
1769*4882a593Smuzhiyun BTRFS_CHUNK_TREE_OBJECTID);
1770*4882a593Smuzhiyun btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1771*4882a593Smuzhiyun BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1772*4882a593Smuzhiyun btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1775*4882a593Smuzhiyun btrfs_mark_buffer_dirty(leaf);
1776*4882a593Smuzhiyun out:
1777*4882a593Smuzhiyun btrfs_free_path(path);
1778*4882a593Smuzhiyun return ret;
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun
find_next_chunk(struct btrfs_fs_info * fs_info)1781*4882a593Smuzhiyun static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1782*4882a593Smuzhiyun {
1783*4882a593Smuzhiyun struct extent_map_tree *em_tree;
1784*4882a593Smuzhiyun struct extent_map *em;
1785*4882a593Smuzhiyun struct rb_node *n;
1786*4882a593Smuzhiyun u64 ret = 0;
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun em_tree = &fs_info->mapping_tree;
1789*4882a593Smuzhiyun read_lock(&em_tree->lock);
1790*4882a593Smuzhiyun n = rb_last(&em_tree->map.rb_root);
1791*4882a593Smuzhiyun if (n) {
1792*4882a593Smuzhiyun em = rb_entry(n, struct extent_map, rb_node);
1793*4882a593Smuzhiyun ret = em->start + em->len;
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun read_unlock(&em_tree->lock);
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun return ret;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1800*4882a593Smuzhiyun static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1801*4882a593Smuzhiyun u64 *devid_ret)
1802*4882a593Smuzhiyun {
1803*4882a593Smuzhiyun int ret;
1804*4882a593Smuzhiyun struct btrfs_key key;
1805*4882a593Smuzhiyun struct btrfs_key found_key;
1806*4882a593Smuzhiyun struct btrfs_path *path;
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun path = btrfs_alloc_path();
1809*4882a593Smuzhiyun if (!path)
1810*4882a593Smuzhiyun return -ENOMEM;
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1813*4882a593Smuzhiyun key.type = BTRFS_DEV_ITEM_KEY;
1814*4882a593Smuzhiyun key.offset = (u64)-1;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1817*4882a593Smuzhiyun if (ret < 0)
1818*4882a593Smuzhiyun goto error;
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun if (ret == 0) {
1821*4882a593Smuzhiyun /* Corruption */
1822*4882a593Smuzhiyun btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1823*4882a593Smuzhiyun ret = -EUCLEAN;
1824*4882a593Smuzhiyun goto error;
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun ret = btrfs_previous_item(fs_info->chunk_root, path,
1828*4882a593Smuzhiyun BTRFS_DEV_ITEMS_OBJECTID,
1829*4882a593Smuzhiyun BTRFS_DEV_ITEM_KEY);
1830*4882a593Smuzhiyun if (ret) {
1831*4882a593Smuzhiyun *devid_ret = 1;
1832*4882a593Smuzhiyun } else {
1833*4882a593Smuzhiyun btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1834*4882a593Smuzhiyun path->slots[0]);
1835*4882a593Smuzhiyun *devid_ret = found_key.offset + 1;
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun ret = 0;
1838*4882a593Smuzhiyun error:
1839*4882a593Smuzhiyun btrfs_free_path(path);
1840*4882a593Smuzhiyun return ret;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun /*
1844*4882a593Smuzhiyun * the device information is stored in the chunk root
1845*4882a593Smuzhiyun * the btrfs_device struct should be fully filled in
1846*4882a593Smuzhiyun */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1847*4882a593Smuzhiyun static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1848*4882a593Smuzhiyun struct btrfs_device *device)
1849*4882a593Smuzhiyun {
1850*4882a593Smuzhiyun int ret;
1851*4882a593Smuzhiyun struct btrfs_path *path;
1852*4882a593Smuzhiyun struct btrfs_dev_item *dev_item;
1853*4882a593Smuzhiyun struct extent_buffer *leaf;
1854*4882a593Smuzhiyun struct btrfs_key key;
1855*4882a593Smuzhiyun unsigned long ptr;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun path = btrfs_alloc_path();
1858*4882a593Smuzhiyun if (!path)
1859*4882a593Smuzhiyun return -ENOMEM;
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1862*4882a593Smuzhiyun key.type = BTRFS_DEV_ITEM_KEY;
1863*4882a593Smuzhiyun key.offset = device->devid;
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1866*4882a593Smuzhiyun &key, sizeof(*dev_item));
1867*4882a593Smuzhiyun if (ret)
1868*4882a593Smuzhiyun goto out;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun leaf = path->nodes[0];
1871*4882a593Smuzhiyun dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun btrfs_set_device_id(leaf, dev_item, device->devid);
1874*4882a593Smuzhiyun btrfs_set_device_generation(leaf, dev_item, 0);
1875*4882a593Smuzhiyun btrfs_set_device_type(leaf, dev_item, device->type);
1876*4882a593Smuzhiyun btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1877*4882a593Smuzhiyun btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1878*4882a593Smuzhiyun btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1879*4882a593Smuzhiyun btrfs_set_device_total_bytes(leaf, dev_item,
1880*4882a593Smuzhiyun btrfs_device_get_disk_total_bytes(device));
1881*4882a593Smuzhiyun btrfs_set_device_bytes_used(leaf, dev_item,
1882*4882a593Smuzhiyun btrfs_device_get_bytes_used(device));
1883*4882a593Smuzhiyun btrfs_set_device_group(leaf, dev_item, 0);
1884*4882a593Smuzhiyun btrfs_set_device_seek_speed(leaf, dev_item, 0);
1885*4882a593Smuzhiyun btrfs_set_device_bandwidth(leaf, dev_item, 0);
1886*4882a593Smuzhiyun btrfs_set_device_start_offset(leaf, dev_item, 0);
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun ptr = btrfs_device_uuid(dev_item);
1889*4882a593Smuzhiyun write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1890*4882a593Smuzhiyun ptr = btrfs_device_fsid(dev_item);
1891*4882a593Smuzhiyun write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1892*4882a593Smuzhiyun ptr, BTRFS_FSID_SIZE);
1893*4882a593Smuzhiyun btrfs_mark_buffer_dirty(leaf);
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun ret = 0;
1896*4882a593Smuzhiyun out:
1897*4882a593Smuzhiyun btrfs_free_path(path);
1898*4882a593Smuzhiyun return ret;
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun /*
1902*4882a593Smuzhiyun * Function to update ctime/mtime for a given device path.
1903*4882a593Smuzhiyun * Mainly used for ctime/mtime based probe like libblkid.
1904*4882a593Smuzhiyun *
1905*4882a593Smuzhiyun * We don't care about errors here, this is just to be kind to userspace.
1906*4882a593Smuzhiyun */
update_dev_time(const char * device_path)1907*4882a593Smuzhiyun static void update_dev_time(const char *device_path)
1908*4882a593Smuzhiyun {
1909*4882a593Smuzhiyun struct path path;
1910*4882a593Smuzhiyun struct timespec64 now;
1911*4882a593Smuzhiyun int ret;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1914*4882a593Smuzhiyun if (ret)
1915*4882a593Smuzhiyun return;
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun now = current_time(d_inode(path.dentry));
1918*4882a593Smuzhiyun inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1919*4882a593Smuzhiyun path_put(&path);
1920*4882a593Smuzhiyun }
1921*4882a593Smuzhiyun
btrfs_rm_dev_item(struct btrfs_device * device)1922*4882a593Smuzhiyun static int btrfs_rm_dev_item(struct btrfs_device *device)
1923*4882a593Smuzhiyun {
1924*4882a593Smuzhiyun struct btrfs_root *root = device->fs_info->chunk_root;
1925*4882a593Smuzhiyun int ret;
1926*4882a593Smuzhiyun struct btrfs_path *path;
1927*4882a593Smuzhiyun struct btrfs_key key;
1928*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun path = btrfs_alloc_path();
1931*4882a593Smuzhiyun if (!path)
1932*4882a593Smuzhiyun return -ENOMEM;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun trans = btrfs_start_transaction(root, 0);
1935*4882a593Smuzhiyun if (IS_ERR(trans)) {
1936*4882a593Smuzhiyun btrfs_free_path(path);
1937*4882a593Smuzhiyun return PTR_ERR(trans);
1938*4882a593Smuzhiyun }
1939*4882a593Smuzhiyun key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1940*4882a593Smuzhiyun key.type = BTRFS_DEV_ITEM_KEY;
1941*4882a593Smuzhiyun key.offset = device->devid;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1944*4882a593Smuzhiyun if (ret) {
1945*4882a593Smuzhiyun if (ret > 0)
1946*4882a593Smuzhiyun ret = -ENOENT;
1947*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1948*4882a593Smuzhiyun btrfs_end_transaction(trans);
1949*4882a593Smuzhiyun goto out;
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun ret = btrfs_del_item(trans, root, path);
1953*4882a593Smuzhiyun if (ret) {
1954*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1955*4882a593Smuzhiyun btrfs_end_transaction(trans);
1956*4882a593Smuzhiyun }
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun out:
1959*4882a593Smuzhiyun btrfs_free_path(path);
1960*4882a593Smuzhiyun if (!ret)
1961*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
1962*4882a593Smuzhiyun return ret;
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun /*
1966*4882a593Smuzhiyun * Verify that @num_devices satisfies the RAID profile constraints in the whole
1967*4882a593Smuzhiyun * filesystem. It's up to the caller to adjust that number regarding eg. device
1968*4882a593Smuzhiyun * replace.
1969*4882a593Smuzhiyun */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)1970*4882a593Smuzhiyun static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1971*4882a593Smuzhiyun u64 num_devices)
1972*4882a593Smuzhiyun {
1973*4882a593Smuzhiyun u64 all_avail;
1974*4882a593Smuzhiyun unsigned seq;
1975*4882a593Smuzhiyun int i;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun do {
1978*4882a593Smuzhiyun seq = read_seqbegin(&fs_info->profiles_lock);
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun all_avail = fs_info->avail_data_alloc_bits |
1981*4882a593Smuzhiyun fs_info->avail_system_alloc_bits |
1982*4882a593Smuzhiyun fs_info->avail_metadata_alloc_bits;
1983*4882a593Smuzhiyun } while (read_seqretry(&fs_info->profiles_lock, seq));
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1986*4882a593Smuzhiyun if (!(all_avail & btrfs_raid_array[i].bg_flag))
1987*4882a593Smuzhiyun continue;
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun if (num_devices < btrfs_raid_array[i].devs_min) {
1990*4882a593Smuzhiyun int ret = btrfs_raid_array[i].mindev_error;
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun if (ret)
1993*4882a593Smuzhiyun return ret;
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun }
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun return 0;
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)2000*4882a593Smuzhiyun static struct btrfs_device * btrfs_find_next_active_device(
2001*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2002*4882a593Smuzhiyun {
2003*4882a593Smuzhiyun struct btrfs_device *next_device;
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2006*4882a593Smuzhiyun if (next_device != device &&
2007*4882a593Smuzhiyun !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2008*4882a593Smuzhiyun && next_device->bdev)
2009*4882a593Smuzhiyun return next_device;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun return NULL;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun /*
2016*4882a593Smuzhiyun * Helper function to check if the given device is part of s_bdev / latest_bdev
2017*4882a593Smuzhiyun * and replace it with the provided or the next active device, in the context
2018*4882a593Smuzhiyun * where this function called, there should be always be another device (or
2019*4882a593Smuzhiyun * this_dev) which is active.
2020*4882a593Smuzhiyun */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * next_device)2021*4882a593Smuzhiyun void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2022*4882a593Smuzhiyun struct btrfs_device *next_device)
2023*4882a593Smuzhiyun {
2024*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = device->fs_info;
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun if (!next_device)
2027*4882a593Smuzhiyun next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2028*4882a593Smuzhiyun device);
2029*4882a593Smuzhiyun ASSERT(next_device);
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun if (fs_info->sb->s_bdev &&
2032*4882a593Smuzhiyun (fs_info->sb->s_bdev == device->bdev))
2033*4882a593Smuzhiyun fs_info->sb->s_bdev = next_device->bdev;
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun if (fs_info->fs_devices->latest_bdev == device->bdev)
2036*4882a593Smuzhiyun fs_info->fs_devices->latest_bdev = next_device->bdev;
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun /*
2040*4882a593Smuzhiyun * Return btrfs_fs_devices::num_devices excluding the device that's being
2041*4882a593Smuzhiyun * currently replaced.
2042*4882a593Smuzhiyun */
btrfs_num_devices(struct btrfs_fs_info * fs_info)2043*4882a593Smuzhiyun static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2044*4882a593Smuzhiyun {
2045*4882a593Smuzhiyun u64 num_devices = fs_info->fs_devices->num_devices;
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun down_read(&fs_info->dev_replace.rwsem);
2048*4882a593Smuzhiyun if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2049*4882a593Smuzhiyun ASSERT(num_devices > 1);
2050*4882a593Smuzhiyun num_devices--;
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun up_read(&fs_info->dev_replace.rwsem);
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun return num_devices;
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun
btrfs_scratch_superblocks(struct btrfs_fs_info * fs_info,struct block_device * bdev,const char * device_path)2057*4882a593Smuzhiyun void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2058*4882a593Smuzhiyun struct block_device *bdev,
2059*4882a593Smuzhiyun const char *device_path)
2060*4882a593Smuzhiyun {
2061*4882a593Smuzhiyun struct btrfs_super_block *disk_super;
2062*4882a593Smuzhiyun int copy_num;
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun if (!bdev)
2065*4882a593Smuzhiyun return;
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2068*4882a593Smuzhiyun struct page *page;
2069*4882a593Smuzhiyun int ret;
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2072*4882a593Smuzhiyun if (IS_ERR(disk_super))
2073*4882a593Smuzhiyun continue;
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun page = virt_to_page(disk_super);
2078*4882a593Smuzhiyun set_page_dirty(page);
2079*4882a593Smuzhiyun lock_page(page);
2080*4882a593Smuzhiyun /* write_on_page() unlocks the page */
2081*4882a593Smuzhiyun ret = write_one_page(page);
2082*4882a593Smuzhiyun if (ret)
2083*4882a593Smuzhiyun btrfs_warn(fs_info,
2084*4882a593Smuzhiyun "error clearing superblock number %d (%d)",
2085*4882a593Smuzhiyun copy_num, ret);
2086*4882a593Smuzhiyun btrfs_release_disk_super(disk_super);
2087*4882a593Smuzhiyun
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun /* Notify udev that device has changed */
2091*4882a593Smuzhiyun btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun /* Update ctime/mtime for device path for libblkid */
2094*4882a593Smuzhiyun update_dev_time(device_path);
2095*4882a593Smuzhiyun }
2096*4882a593Smuzhiyun
btrfs_rm_device(struct btrfs_fs_info * fs_info,const char * device_path,u64 devid)2097*4882a593Smuzhiyun int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2098*4882a593Smuzhiyun u64 devid)
2099*4882a593Smuzhiyun {
2100*4882a593Smuzhiyun struct btrfs_device *device;
2101*4882a593Smuzhiyun struct btrfs_fs_devices *cur_devices;
2102*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2103*4882a593Smuzhiyun u64 num_devices;
2104*4882a593Smuzhiyun int ret = 0;
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun /*
2107*4882a593Smuzhiyun * The device list in fs_devices is accessed without locks (neither
2108*4882a593Smuzhiyun * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2109*4882a593Smuzhiyun * filesystem and another device rm cannot run.
2110*4882a593Smuzhiyun */
2111*4882a593Smuzhiyun num_devices = btrfs_num_devices(fs_info);
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2114*4882a593Smuzhiyun if (ret)
2115*4882a593Smuzhiyun goto out;
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun if (IS_ERR(device)) {
2120*4882a593Smuzhiyun if (PTR_ERR(device) == -ENOENT &&
2121*4882a593Smuzhiyun device_path && strcmp(device_path, "missing") == 0)
2122*4882a593Smuzhiyun ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2123*4882a593Smuzhiyun else
2124*4882a593Smuzhiyun ret = PTR_ERR(device);
2125*4882a593Smuzhiyun goto out;
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun if (btrfs_pinned_by_swapfile(fs_info, device)) {
2129*4882a593Smuzhiyun btrfs_warn_in_rcu(fs_info,
2130*4882a593Smuzhiyun "cannot remove device %s (devid %llu) due to active swapfile",
2131*4882a593Smuzhiyun rcu_str_deref(device->name), device->devid);
2132*4882a593Smuzhiyun ret = -ETXTBSY;
2133*4882a593Smuzhiyun goto out;
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun
2136*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2137*4882a593Smuzhiyun ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2138*4882a593Smuzhiyun goto out;
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2142*4882a593Smuzhiyun fs_info->fs_devices->rw_devices == 1) {
2143*4882a593Smuzhiyun ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2144*4882a593Smuzhiyun goto out;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2148*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
2149*4882a593Smuzhiyun list_del_init(&device->dev_alloc_list);
2150*4882a593Smuzhiyun device->fs_devices->rw_devices--;
2151*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
2152*4882a593Smuzhiyun }
2153*4882a593Smuzhiyun
2154*4882a593Smuzhiyun ret = btrfs_shrink_device(device, 0);
2155*4882a593Smuzhiyun if (!ret)
2156*4882a593Smuzhiyun btrfs_reada_remove_dev(device);
2157*4882a593Smuzhiyun if (ret)
2158*4882a593Smuzhiyun goto error_undo;
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun /*
2161*4882a593Smuzhiyun * TODO: the superblock still includes this device in its num_devices
2162*4882a593Smuzhiyun * counter although write_all_supers() is not locked out. This
2163*4882a593Smuzhiyun * could give a filesystem state which requires a degraded mount.
2164*4882a593Smuzhiyun */
2165*4882a593Smuzhiyun ret = btrfs_rm_dev_item(device);
2166*4882a593Smuzhiyun if (ret)
2167*4882a593Smuzhiyun goto error_undo;
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2170*4882a593Smuzhiyun btrfs_scrub_cancel_dev(device);
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun /*
2173*4882a593Smuzhiyun * the device list mutex makes sure that we don't change
2174*4882a593Smuzhiyun * the device list while someone else is writing out all
2175*4882a593Smuzhiyun * the device supers. Whoever is writing all supers, should
2176*4882a593Smuzhiyun * lock the device list mutex before getting the number of
2177*4882a593Smuzhiyun * devices in the super block (super_copy). Conversely,
2178*4882a593Smuzhiyun * whoever updates the number of devices in the super block
2179*4882a593Smuzhiyun * (super_copy) should hold the device list mutex.
2180*4882a593Smuzhiyun */
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun /*
2183*4882a593Smuzhiyun * In normal cases the cur_devices == fs_devices. But in case
2184*4882a593Smuzhiyun * of deleting a seed device, the cur_devices should point to
2185*4882a593Smuzhiyun * its own fs_devices listed under the fs_devices->seed.
2186*4882a593Smuzhiyun */
2187*4882a593Smuzhiyun cur_devices = device->fs_devices;
2188*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
2189*4882a593Smuzhiyun list_del_rcu(&device->dev_list);
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun cur_devices->num_devices--;
2192*4882a593Smuzhiyun cur_devices->total_devices--;
2193*4882a593Smuzhiyun /* Update total_devices of the parent fs_devices if it's seed */
2194*4882a593Smuzhiyun if (cur_devices != fs_devices)
2195*4882a593Smuzhiyun fs_devices->total_devices--;
2196*4882a593Smuzhiyun
2197*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2198*4882a593Smuzhiyun cur_devices->missing_devices--;
2199*4882a593Smuzhiyun
2200*4882a593Smuzhiyun btrfs_assign_next_active_device(device, NULL);
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun if (device->bdev) {
2203*4882a593Smuzhiyun cur_devices->open_devices--;
2204*4882a593Smuzhiyun /* remove sysfs entry */
2205*4882a593Smuzhiyun btrfs_sysfs_remove_device(device);
2206*4882a593Smuzhiyun }
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2209*4882a593Smuzhiyun btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2210*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun /*
2213*4882a593Smuzhiyun * at this point, the device is zero sized and detached from
2214*4882a593Smuzhiyun * the devices list. All that's left is to zero out the old
2215*4882a593Smuzhiyun * supers and free the device.
2216*4882a593Smuzhiyun */
2217*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2218*4882a593Smuzhiyun btrfs_scratch_superblocks(fs_info, device->bdev,
2219*4882a593Smuzhiyun device->name->str);
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun btrfs_close_bdev(device);
2222*4882a593Smuzhiyun synchronize_rcu();
2223*4882a593Smuzhiyun btrfs_free_device(device);
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun if (cur_devices->open_devices == 0) {
2226*4882a593Smuzhiyun list_del_init(&cur_devices->seed_list);
2227*4882a593Smuzhiyun close_fs_devices(cur_devices);
2228*4882a593Smuzhiyun free_fs_devices(cur_devices);
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun out:
2232*4882a593Smuzhiyun return ret;
2233*4882a593Smuzhiyun
2234*4882a593Smuzhiyun error_undo:
2235*4882a593Smuzhiyun btrfs_reada_undo_remove_dev(device);
2236*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2237*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
2238*4882a593Smuzhiyun list_add(&device->dev_alloc_list,
2239*4882a593Smuzhiyun &fs_devices->alloc_list);
2240*4882a593Smuzhiyun device->fs_devices->rw_devices++;
2241*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
2242*4882a593Smuzhiyun }
2243*4882a593Smuzhiyun goto out;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2246*4882a593Smuzhiyun void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2247*4882a593Smuzhiyun {
2248*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun /*
2253*4882a593Smuzhiyun * in case of fs with no seed, srcdev->fs_devices will point
2254*4882a593Smuzhiyun * to fs_devices of fs_info. However when the dev being replaced is
2255*4882a593Smuzhiyun * a seed dev it will point to the seed's local fs_devices. In short
2256*4882a593Smuzhiyun * srcdev will have its correct fs_devices in both the cases.
2257*4882a593Smuzhiyun */
2258*4882a593Smuzhiyun fs_devices = srcdev->fs_devices;
2259*4882a593Smuzhiyun
2260*4882a593Smuzhiyun list_del_rcu(&srcdev->dev_list);
2261*4882a593Smuzhiyun list_del(&srcdev->dev_alloc_list);
2262*4882a593Smuzhiyun fs_devices->num_devices--;
2263*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2264*4882a593Smuzhiyun fs_devices->missing_devices--;
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2267*4882a593Smuzhiyun fs_devices->rw_devices--;
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun if (srcdev->bdev)
2270*4882a593Smuzhiyun fs_devices->open_devices--;
2271*4882a593Smuzhiyun }
2272*4882a593Smuzhiyun
btrfs_rm_dev_replace_free_srcdev(struct btrfs_device * srcdev)2273*4882a593Smuzhiyun void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2274*4882a593Smuzhiyun {
2275*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun mutex_lock(&uuid_mutex);
2278*4882a593Smuzhiyun
2279*4882a593Smuzhiyun btrfs_close_bdev(srcdev);
2280*4882a593Smuzhiyun synchronize_rcu();
2281*4882a593Smuzhiyun btrfs_free_device(srcdev);
2282*4882a593Smuzhiyun
2283*4882a593Smuzhiyun /* if this is no devs we rather delete the fs_devices */
2284*4882a593Smuzhiyun if (!fs_devices->num_devices) {
2285*4882a593Smuzhiyun /*
2286*4882a593Smuzhiyun * On a mounted FS, num_devices can't be zero unless it's a
2287*4882a593Smuzhiyun * seed. In case of a seed device being replaced, the replace
2288*4882a593Smuzhiyun * target added to the sprout FS, so there will be no more
2289*4882a593Smuzhiyun * device left under the seed FS.
2290*4882a593Smuzhiyun */
2291*4882a593Smuzhiyun ASSERT(fs_devices->seeding);
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun list_del_init(&fs_devices->seed_list);
2294*4882a593Smuzhiyun close_fs_devices(fs_devices);
2295*4882a593Smuzhiyun free_fs_devices(fs_devices);
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun mutex_unlock(&uuid_mutex);
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2300*4882a593Smuzhiyun void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2301*4882a593Smuzhiyun {
2302*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun btrfs_sysfs_remove_device(tgtdev);
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun if (tgtdev->bdev)
2309*4882a593Smuzhiyun fs_devices->open_devices--;
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun fs_devices->num_devices--;
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun btrfs_assign_next_active_device(tgtdev, NULL);
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun list_del_rcu(&tgtdev->dev_list);
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun /*
2320*4882a593Smuzhiyun * The update_dev_time() with in btrfs_scratch_superblocks()
2321*4882a593Smuzhiyun * may lead to a call to btrfs_show_devname() which will try
2322*4882a593Smuzhiyun * to hold device_list_mutex. And here this device
2323*4882a593Smuzhiyun * is already out of device list, so we don't have to hold
2324*4882a593Smuzhiyun * the device_list_mutex lock.
2325*4882a593Smuzhiyun */
2326*4882a593Smuzhiyun btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2327*4882a593Smuzhiyun tgtdev->name->str);
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun btrfs_close_bdev(tgtdev);
2330*4882a593Smuzhiyun synchronize_rcu();
2331*4882a593Smuzhiyun btrfs_free_device(tgtdev);
2332*4882a593Smuzhiyun }
2333*4882a593Smuzhiyun
btrfs_find_device_by_path(struct btrfs_fs_info * fs_info,const char * device_path)2334*4882a593Smuzhiyun static struct btrfs_device *btrfs_find_device_by_path(
2335*4882a593Smuzhiyun struct btrfs_fs_info *fs_info, const char *device_path)
2336*4882a593Smuzhiyun {
2337*4882a593Smuzhiyun int ret = 0;
2338*4882a593Smuzhiyun struct btrfs_super_block *disk_super;
2339*4882a593Smuzhiyun u64 devid;
2340*4882a593Smuzhiyun u8 *dev_uuid;
2341*4882a593Smuzhiyun struct block_device *bdev;
2342*4882a593Smuzhiyun struct btrfs_device *device;
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2345*4882a593Smuzhiyun fs_info->bdev_holder, 0, &bdev, &disk_super);
2346*4882a593Smuzhiyun if (ret)
2347*4882a593Smuzhiyun return ERR_PTR(ret);
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun devid = btrfs_stack_device_id(&disk_super->dev_item);
2350*4882a593Smuzhiyun dev_uuid = disk_super->dev_item.uuid;
2351*4882a593Smuzhiyun if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2352*4882a593Smuzhiyun device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2353*4882a593Smuzhiyun disk_super->metadata_uuid, true);
2354*4882a593Smuzhiyun else
2355*4882a593Smuzhiyun device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2356*4882a593Smuzhiyun disk_super->fsid, true);
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun btrfs_release_disk_super(disk_super);
2359*4882a593Smuzhiyun if (!device)
2360*4882a593Smuzhiyun device = ERR_PTR(-ENOENT);
2361*4882a593Smuzhiyun blkdev_put(bdev, FMODE_READ);
2362*4882a593Smuzhiyun return device;
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun /*
2366*4882a593Smuzhiyun * Lookup a device given by device id, or the path if the id is 0.
2367*4882a593Smuzhiyun */
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * device_path)2368*4882a593Smuzhiyun struct btrfs_device *btrfs_find_device_by_devspec(
2369*4882a593Smuzhiyun struct btrfs_fs_info *fs_info, u64 devid,
2370*4882a593Smuzhiyun const char *device_path)
2371*4882a593Smuzhiyun {
2372*4882a593Smuzhiyun struct btrfs_device *device;
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun if (devid) {
2375*4882a593Smuzhiyun device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2376*4882a593Smuzhiyun NULL, true);
2377*4882a593Smuzhiyun if (!device)
2378*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
2379*4882a593Smuzhiyun return device;
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun if (!device_path || !device_path[0])
2383*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun if (strcmp(device_path, "missing") == 0) {
2386*4882a593Smuzhiyun /* Find first missing device */
2387*4882a593Smuzhiyun list_for_each_entry(device, &fs_info->fs_devices->devices,
2388*4882a593Smuzhiyun dev_list) {
2389*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2390*4882a593Smuzhiyun &device->dev_state) && !device->bdev)
2391*4882a593Smuzhiyun return device;
2392*4882a593Smuzhiyun }
2393*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun
2396*4882a593Smuzhiyun return btrfs_find_device_by_path(fs_info, device_path);
2397*4882a593Smuzhiyun }
2398*4882a593Smuzhiyun
2399*4882a593Smuzhiyun /*
2400*4882a593Smuzhiyun * does all the dirty work required for changing file system's UUID.
2401*4882a593Smuzhiyun */
btrfs_prepare_sprout(struct btrfs_fs_info * fs_info)2402*4882a593Smuzhiyun static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2403*4882a593Smuzhiyun {
2404*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2405*4882a593Smuzhiyun struct btrfs_fs_devices *old_devices;
2406*4882a593Smuzhiyun struct btrfs_fs_devices *seed_devices;
2407*4882a593Smuzhiyun struct btrfs_super_block *disk_super = fs_info->super_copy;
2408*4882a593Smuzhiyun struct btrfs_device *device;
2409*4882a593Smuzhiyun u64 super_flags;
2410*4882a593Smuzhiyun
2411*4882a593Smuzhiyun lockdep_assert_held(&uuid_mutex);
2412*4882a593Smuzhiyun if (!fs_devices->seeding)
2413*4882a593Smuzhiyun return -EINVAL;
2414*4882a593Smuzhiyun
2415*4882a593Smuzhiyun /*
2416*4882a593Smuzhiyun * Private copy of the seed devices, anchored at
2417*4882a593Smuzhiyun * fs_info->fs_devices->seed_list
2418*4882a593Smuzhiyun */
2419*4882a593Smuzhiyun seed_devices = alloc_fs_devices(NULL, NULL);
2420*4882a593Smuzhiyun if (IS_ERR(seed_devices))
2421*4882a593Smuzhiyun return PTR_ERR(seed_devices);
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun /*
2424*4882a593Smuzhiyun * It's necessary to retain a copy of the original seed fs_devices in
2425*4882a593Smuzhiyun * fs_uuids so that filesystems which have been seeded can successfully
2426*4882a593Smuzhiyun * reference the seed device from open_seed_devices. This also supports
2427*4882a593Smuzhiyun * multiple fs seed.
2428*4882a593Smuzhiyun */
2429*4882a593Smuzhiyun old_devices = clone_fs_devices(fs_devices);
2430*4882a593Smuzhiyun if (IS_ERR(old_devices)) {
2431*4882a593Smuzhiyun kfree(seed_devices);
2432*4882a593Smuzhiyun return PTR_ERR(old_devices);
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun list_add(&old_devices->fs_list, &fs_uuids);
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2438*4882a593Smuzhiyun seed_devices->opened = 1;
2439*4882a593Smuzhiyun INIT_LIST_HEAD(&seed_devices->devices);
2440*4882a593Smuzhiyun INIT_LIST_HEAD(&seed_devices->alloc_list);
2441*4882a593Smuzhiyun mutex_init(&seed_devices->device_list_mutex);
2442*4882a593Smuzhiyun
2443*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
2444*4882a593Smuzhiyun list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2445*4882a593Smuzhiyun synchronize_rcu);
2446*4882a593Smuzhiyun list_for_each_entry(device, &seed_devices->devices, dev_list)
2447*4882a593Smuzhiyun device->fs_devices = seed_devices;
2448*4882a593Smuzhiyun
2449*4882a593Smuzhiyun fs_devices->seeding = false;
2450*4882a593Smuzhiyun fs_devices->num_devices = 0;
2451*4882a593Smuzhiyun fs_devices->open_devices = 0;
2452*4882a593Smuzhiyun fs_devices->missing_devices = 0;
2453*4882a593Smuzhiyun fs_devices->rotating = false;
2454*4882a593Smuzhiyun list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun generate_random_uuid(fs_devices->fsid);
2457*4882a593Smuzhiyun memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2458*4882a593Smuzhiyun memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2459*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
2460*4882a593Smuzhiyun
2461*4882a593Smuzhiyun super_flags = btrfs_super_flags(disk_super) &
2462*4882a593Smuzhiyun ~BTRFS_SUPER_FLAG_SEEDING;
2463*4882a593Smuzhiyun btrfs_set_super_flags(disk_super, super_flags);
2464*4882a593Smuzhiyun
2465*4882a593Smuzhiyun return 0;
2466*4882a593Smuzhiyun }
2467*4882a593Smuzhiyun
2468*4882a593Smuzhiyun /*
2469*4882a593Smuzhiyun * Store the expected generation for seed devices in device items.
2470*4882a593Smuzhiyun */
btrfs_finish_sprout(struct btrfs_trans_handle * trans)2471*4882a593Smuzhiyun static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2472*4882a593Smuzhiyun {
2473*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2474*4882a593Smuzhiyun struct btrfs_root *root = fs_info->chunk_root;
2475*4882a593Smuzhiyun struct btrfs_path *path;
2476*4882a593Smuzhiyun struct extent_buffer *leaf;
2477*4882a593Smuzhiyun struct btrfs_dev_item *dev_item;
2478*4882a593Smuzhiyun struct btrfs_device *device;
2479*4882a593Smuzhiyun struct btrfs_key key;
2480*4882a593Smuzhiyun u8 fs_uuid[BTRFS_FSID_SIZE];
2481*4882a593Smuzhiyun u8 dev_uuid[BTRFS_UUID_SIZE];
2482*4882a593Smuzhiyun u64 devid;
2483*4882a593Smuzhiyun int ret;
2484*4882a593Smuzhiyun
2485*4882a593Smuzhiyun path = btrfs_alloc_path();
2486*4882a593Smuzhiyun if (!path)
2487*4882a593Smuzhiyun return -ENOMEM;
2488*4882a593Smuzhiyun
2489*4882a593Smuzhiyun key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2490*4882a593Smuzhiyun key.offset = 0;
2491*4882a593Smuzhiyun key.type = BTRFS_DEV_ITEM_KEY;
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun while (1) {
2494*4882a593Smuzhiyun ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2495*4882a593Smuzhiyun if (ret < 0)
2496*4882a593Smuzhiyun goto error;
2497*4882a593Smuzhiyun
2498*4882a593Smuzhiyun leaf = path->nodes[0];
2499*4882a593Smuzhiyun next_slot:
2500*4882a593Smuzhiyun if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2501*4882a593Smuzhiyun ret = btrfs_next_leaf(root, path);
2502*4882a593Smuzhiyun if (ret > 0)
2503*4882a593Smuzhiyun break;
2504*4882a593Smuzhiyun if (ret < 0)
2505*4882a593Smuzhiyun goto error;
2506*4882a593Smuzhiyun leaf = path->nodes[0];
2507*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2508*4882a593Smuzhiyun btrfs_release_path(path);
2509*4882a593Smuzhiyun continue;
2510*4882a593Smuzhiyun }
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2513*4882a593Smuzhiyun if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2514*4882a593Smuzhiyun key.type != BTRFS_DEV_ITEM_KEY)
2515*4882a593Smuzhiyun break;
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun dev_item = btrfs_item_ptr(leaf, path->slots[0],
2518*4882a593Smuzhiyun struct btrfs_dev_item);
2519*4882a593Smuzhiyun devid = btrfs_device_id(leaf, dev_item);
2520*4882a593Smuzhiyun read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2521*4882a593Smuzhiyun BTRFS_UUID_SIZE);
2522*4882a593Smuzhiyun read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2523*4882a593Smuzhiyun BTRFS_FSID_SIZE);
2524*4882a593Smuzhiyun device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2525*4882a593Smuzhiyun fs_uuid, true);
2526*4882a593Smuzhiyun BUG_ON(!device); /* Logic error */
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun if (device->fs_devices->seeding) {
2529*4882a593Smuzhiyun btrfs_set_device_generation(leaf, dev_item,
2530*4882a593Smuzhiyun device->generation);
2531*4882a593Smuzhiyun btrfs_mark_buffer_dirty(leaf);
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun path->slots[0]++;
2535*4882a593Smuzhiyun goto next_slot;
2536*4882a593Smuzhiyun }
2537*4882a593Smuzhiyun ret = 0;
2538*4882a593Smuzhiyun error:
2539*4882a593Smuzhiyun btrfs_free_path(path);
2540*4882a593Smuzhiyun return ret;
2541*4882a593Smuzhiyun }
2542*4882a593Smuzhiyun
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2543*4882a593Smuzhiyun int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2544*4882a593Smuzhiyun {
2545*4882a593Smuzhiyun struct btrfs_root *root = fs_info->dev_root;
2546*4882a593Smuzhiyun struct request_queue *q;
2547*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
2548*4882a593Smuzhiyun struct btrfs_device *device;
2549*4882a593Smuzhiyun struct block_device *bdev;
2550*4882a593Smuzhiyun struct super_block *sb = fs_info->sb;
2551*4882a593Smuzhiyun struct rcu_string *name;
2552*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2553*4882a593Smuzhiyun u64 orig_super_total_bytes;
2554*4882a593Smuzhiyun u64 orig_super_num_devices;
2555*4882a593Smuzhiyun int seeding_dev = 0;
2556*4882a593Smuzhiyun int ret = 0;
2557*4882a593Smuzhiyun bool locked = false;
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun if (sb_rdonly(sb) && !fs_devices->seeding)
2560*4882a593Smuzhiyun return -EROFS;
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2563*4882a593Smuzhiyun fs_info->bdev_holder);
2564*4882a593Smuzhiyun if (IS_ERR(bdev))
2565*4882a593Smuzhiyun return PTR_ERR(bdev);
2566*4882a593Smuzhiyun
2567*4882a593Smuzhiyun if (fs_devices->seeding) {
2568*4882a593Smuzhiyun seeding_dev = 1;
2569*4882a593Smuzhiyun down_write(&sb->s_umount);
2570*4882a593Smuzhiyun mutex_lock(&uuid_mutex);
2571*4882a593Smuzhiyun locked = true;
2572*4882a593Smuzhiyun }
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun sync_blockdev(bdev);
2575*4882a593Smuzhiyun
2576*4882a593Smuzhiyun rcu_read_lock();
2577*4882a593Smuzhiyun list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2578*4882a593Smuzhiyun if (device->bdev == bdev) {
2579*4882a593Smuzhiyun ret = -EEXIST;
2580*4882a593Smuzhiyun rcu_read_unlock();
2581*4882a593Smuzhiyun goto error;
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun rcu_read_unlock();
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun device = btrfs_alloc_device(fs_info, NULL, NULL);
2587*4882a593Smuzhiyun if (IS_ERR(device)) {
2588*4882a593Smuzhiyun /* we can safely leave the fs_devices entry around */
2589*4882a593Smuzhiyun ret = PTR_ERR(device);
2590*4882a593Smuzhiyun goto error;
2591*4882a593Smuzhiyun }
2592*4882a593Smuzhiyun
2593*4882a593Smuzhiyun name = rcu_string_strdup(device_path, GFP_KERNEL);
2594*4882a593Smuzhiyun if (!name) {
2595*4882a593Smuzhiyun ret = -ENOMEM;
2596*4882a593Smuzhiyun goto error_free_device;
2597*4882a593Smuzhiyun }
2598*4882a593Smuzhiyun rcu_assign_pointer(device->name, name);
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun trans = btrfs_start_transaction(root, 0);
2601*4882a593Smuzhiyun if (IS_ERR(trans)) {
2602*4882a593Smuzhiyun ret = PTR_ERR(trans);
2603*4882a593Smuzhiyun goto error_free_device;
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun q = bdev_get_queue(bdev);
2607*4882a593Smuzhiyun set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2608*4882a593Smuzhiyun device->generation = trans->transid;
2609*4882a593Smuzhiyun device->io_width = fs_info->sectorsize;
2610*4882a593Smuzhiyun device->io_align = fs_info->sectorsize;
2611*4882a593Smuzhiyun device->sector_size = fs_info->sectorsize;
2612*4882a593Smuzhiyun device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2613*4882a593Smuzhiyun fs_info->sectorsize);
2614*4882a593Smuzhiyun device->disk_total_bytes = device->total_bytes;
2615*4882a593Smuzhiyun device->commit_total_bytes = device->total_bytes;
2616*4882a593Smuzhiyun device->fs_info = fs_info;
2617*4882a593Smuzhiyun device->bdev = bdev;
2618*4882a593Smuzhiyun set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2619*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2620*4882a593Smuzhiyun device->mode = FMODE_EXCL;
2621*4882a593Smuzhiyun device->dev_stats_valid = 1;
2622*4882a593Smuzhiyun set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun if (seeding_dev) {
2625*4882a593Smuzhiyun sb->s_flags &= ~SB_RDONLY;
2626*4882a593Smuzhiyun ret = btrfs_prepare_sprout(fs_info);
2627*4882a593Smuzhiyun if (ret) {
2628*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
2629*4882a593Smuzhiyun goto error_trans;
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun }
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun device->fs_devices = fs_devices;
2634*4882a593Smuzhiyun
2635*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
2636*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
2637*4882a593Smuzhiyun list_add_rcu(&device->dev_list, &fs_devices->devices);
2638*4882a593Smuzhiyun list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2639*4882a593Smuzhiyun fs_devices->num_devices++;
2640*4882a593Smuzhiyun fs_devices->open_devices++;
2641*4882a593Smuzhiyun fs_devices->rw_devices++;
2642*4882a593Smuzhiyun fs_devices->total_devices++;
2643*4882a593Smuzhiyun fs_devices->total_rw_bytes += device->total_bytes;
2644*4882a593Smuzhiyun
2645*4882a593Smuzhiyun atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2646*4882a593Smuzhiyun
2647*4882a593Smuzhiyun if (!blk_queue_nonrot(q))
2648*4882a593Smuzhiyun fs_devices->rotating = true;
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2651*4882a593Smuzhiyun btrfs_set_super_total_bytes(fs_info->super_copy,
2652*4882a593Smuzhiyun round_down(orig_super_total_bytes + device->total_bytes,
2653*4882a593Smuzhiyun fs_info->sectorsize));
2654*4882a593Smuzhiyun
2655*4882a593Smuzhiyun orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2656*4882a593Smuzhiyun btrfs_set_super_num_devices(fs_info->super_copy,
2657*4882a593Smuzhiyun orig_super_num_devices + 1);
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun /*
2660*4882a593Smuzhiyun * we've got more storage, clear any full flags on the space
2661*4882a593Smuzhiyun * infos
2662*4882a593Smuzhiyun */
2663*4882a593Smuzhiyun btrfs_clear_space_info_full(fs_info);
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
2666*4882a593Smuzhiyun
2667*4882a593Smuzhiyun /* Add sysfs device entry */
2668*4882a593Smuzhiyun btrfs_sysfs_add_device(device);
2669*4882a593Smuzhiyun
2670*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
2671*4882a593Smuzhiyun
2672*4882a593Smuzhiyun if (seeding_dev) {
2673*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
2674*4882a593Smuzhiyun ret = init_first_rw_device(trans);
2675*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
2676*4882a593Smuzhiyun if (ret) {
2677*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
2678*4882a593Smuzhiyun goto error_sysfs;
2679*4882a593Smuzhiyun }
2680*4882a593Smuzhiyun }
2681*4882a593Smuzhiyun
2682*4882a593Smuzhiyun ret = btrfs_add_dev_item(trans, device);
2683*4882a593Smuzhiyun if (ret) {
2684*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
2685*4882a593Smuzhiyun goto error_sysfs;
2686*4882a593Smuzhiyun }
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun if (seeding_dev) {
2689*4882a593Smuzhiyun ret = btrfs_finish_sprout(trans);
2690*4882a593Smuzhiyun if (ret) {
2691*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
2692*4882a593Smuzhiyun goto error_sysfs;
2693*4882a593Smuzhiyun }
2694*4882a593Smuzhiyun
2695*4882a593Smuzhiyun /*
2696*4882a593Smuzhiyun * fs_devices now represents the newly sprouted filesystem and
2697*4882a593Smuzhiyun * its fsid has been changed by btrfs_prepare_sprout
2698*4882a593Smuzhiyun */
2699*4882a593Smuzhiyun btrfs_sysfs_update_sprout_fsid(fs_devices);
2700*4882a593Smuzhiyun }
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
2703*4882a593Smuzhiyun
2704*4882a593Smuzhiyun if (seeding_dev) {
2705*4882a593Smuzhiyun mutex_unlock(&uuid_mutex);
2706*4882a593Smuzhiyun up_write(&sb->s_umount);
2707*4882a593Smuzhiyun locked = false;
2708*4882a593Smuzhiyun
2709*4882a593Smuzhiyun if (ret) /* transaction commit */
2710*4882a593Smuzhiyun return ret;
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun ret = btrfs_relocate_sys_chunks(fs_info);
2713*4882a593Smuzhiyun if (ret < 0)
2714*4882a593Smuzhiyun btrfs_handle_fs_error(fs_info, ret,
2715*4882a593Smuzhiyun "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2716*4882a593Smuzhiyun trans = btrfs_attach_transaction(root);
2717*4882a593Smuzhiyun if (IS_ERR(trans)) {
2718*4882a593Smuzhiyun if (PTR_ERR(trans) == -ENOENT)
2719*4882a593Smuzhiyun return 0;
2720*4882a593Smuzhiyun ret = PTR_ERR(trans);
2721*4882a593Smuzhiyun trans = NULL;
2722*4882a593Smuzhiyun goto error_sysfs;
2723*4882a593Smuzhiyun }
2724*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
2725*4882a593Smuzhiyun }
2726*4882a593Smuzhiyun
2727*4882a593Smuzhiyun /*
2728*4882a593Smuzhiyun * Now that we have written a new super block to this device, check all
2729*4882a593Smuzhiyun * other fs_devices list if device_path alienates any other scanned
2730*4882a593Smuzhiyun * device.
2731*4882a593Smuzhiyun * We can ignore the return value as it typically returns -EINVAL and
2732*4882a593Smuzhiyun * only succeeds if the device was an alien.
2733*4882a593Smuzhiyun */
2734*4882a593Smuzhiyun btrfs_forget_devices(device_path);
2735*4882a593Smuzhiyun
2736*4882a593Smuzhiyun /* Update ctime/mtime for blkid or udev */
2737*4882a593Smuzhiyun update_dev_time(device_path);
2738*4882a593Smuzhiyun
2739*4882a593Smuzhiyun return ret;
2740*4882a593Smuzhiyun
2741*4882a593Smuzhiyun error_sysfs:
2742*4882a593Smuzhiyun btrfs_sysfs_remove_device(device);
2743*4882a593Smuzhiyun mutex_lock(&fs_info->fs_devices->device_list_mutex);
2744*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
2745*4882a593Smuzhiyun list_del_rcu(&device->dev_list);
2746*4882a593Smuzhiyun list_del(&device->dev_alloc_list);
2747*4882a593Smuzhiyun fs_info->fs_devices->num_devices--;
2748*4882a593Smuzhiyun fs_info->fs_devices->open_devices--;
2749*4882a593Smuzhiyun fs_info->fs_devices->rw_devices--;
2750*4882a593Smuzhiyun fs_info->fs_devices->total_devices--;
2751*4882a593Smuzhiyun fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2752*4882a593Smuzhiyun atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2753*4882a593Smuzhiyun btrfs_set_super_total_bytes(fs_info->super_copy,
2754*4882a593Smuzhiyun orig_super_total_bytes);
2755*4882a593Smuzhiyun btrfs_set_super_num_devices(fs_info->super_copy,
2756*4882a593Smuzhiyun orig_super_num_devices);
2757*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
2758*4882a593Smuzhiyun mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2759*4882a593Smuzhiyun error_trans:
2760*4882a593Smuzhiyun if (seeding_dev)
2761*4882a593Smuzhiyun sb->s_flags |= SB_RDONLY;
2762*4882a593Smuzhiyun if (trans)
2763*4882a593Smuzhiyun btrfs_end_transaction(trans);
2764*4882a593Smuzhiyun error_free_device:
2765*4882a593Smuzhiyun btrfs_free_device(device);
2766*4882a593Smuzhiyun error:
2767*4882a593Smuzhiyun blkdev_put(bdev, FMODE_EXCL);
2768*4882a593Smuzhiyun if (locked) {
2769*4882a593Smuzhiyun mutex_unlock(&uuid_mutex);
2770*4882a593Smuzhiyun up_write(&sb->s_umount);
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun return ret;
2773*4882a593Smuzhiyun }
2774*4882a593Smuzhiyun
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2775*4882a593Smuzhiyun static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2776*4882a593Smuzhiyun struct btrfs_device *device)
2777*4882a593Smuzhiyun {
2778*4882a593Smuzhiyun int ret;
2779*4882a593Smuzhiyun struct btrfs_path *path;
2780*4882a593Smuzhiyun struct btrfs_root *root = device->fs_info->chunk_root;
2781*4882a593Smuzhiyun struct btrfs_dev_item *dev_item;
2782*4882a593Smuzhiyun struct extent_buffer *leaf;
2783*4882a593Smuzhiyun struct btrfs_key key;
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun path = btrfs_alloc_path();
2786*4882a593Smuzhiyun if (!path)
2787*4882a593Smuzhiyun return -ENOMEM;
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2790*4882a593Smuzhiyun key.type = BTRFS_DEV_ITEM_KEY;
2791*4882a593Smuzhiyun key.offset = device->devid;
2792*4882a593Smuzhiyun
2793*4882a593Smuzhiyun ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2794*4882a593Smuzhiyun if (ret < 0)
2795*4882a593Smuzhiyun goto out;
2796*4882a593Smuzhiyun
2797*4882a593Smuzhiyun if (ret > 0) {
2798*4882a593Smuzhiyun ret = -ENOENT;
2799*4882a593Smuzhiyun goto out;
2800*4882a593Smuzhiyun }
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun leaf = path->nodes[0];
2803*4882a593Smuzhiyun dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2804*4882a593Smuzhiyun
2805*4882a593Smuzhiyun btrfs_set_device_id(leaf, dev_item, device->devid);
2806*4882a593Smuzhiyun btrfs_set_device_type(leaf, dev_item, device->type);
2807*4882a593Smuzhiyun btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2808*4882a593Smuzhiyun btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2809*4882a593Smuzhiyun btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2810*4882a593Smuzhiyun btrfs_set_device_total_bytes(leaf, dev_item,
2811*4882a593Smuzhiyun btrfs_device_get_disk_total_bytes(device));
2812*4882a593Smuzhiyun btrfs_set_device_bytes_used(leaf, dev_item,
2813*4882a593Smuzhiyun btrfs_device_get_bytes_used(device));
2814*4882a593Smuzhiyun btrfs_mark_buffer_dirty(leaf);
2815*4882a593Smuzhiyun
2816*4882a593Smuzhiyun out:
2817*4882a593Smuzhiyun btrfs_free_path(path);
2818*4882a593Smuzhiyun return ret;
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2821*4882a593Smuzhiyun int btrfs_grow_device(struct btrfs_trans_handle *trans,
2822*4882a593Smuzhiyun struct btrfs_device *device, u64 new_size)
2823*4882a593Smuzhiyun {
2824*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = device->fs_info;
2825*4882a593Smuzhiyun struct btrfs_super_block *super_copy = fs_info->super_copy;
2826*4882a593Smuzhiyun u64 old_total;
2827*4882a593Smuzhiyun u64 diff;
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2830*4882a593Smuzhiyun return -EACCES;
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun new_size = round_down(new_size, fs_info->sectorsize);
2833*4882a593Smuzhiyun
2834*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
2835*4882a593Smuzhiyun old_total = btrfs_super_total_bytes(super_copy);
2836*4882a593Smuzhiyun diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun if (new_size <= device->total_bytes ||
2839*4882a593Smuzhiyun test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2840*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
2841*4882a593Smuzhiyun return -EINVAL;
2842*4882a593Smuzhiyun }
2843*4882a593Smuzhiyun
2844*4882a593Smuzhiyun btrfs_set_super_total_bytes(super_copy,
2845*4882a593Smuzhiyun round_down(old_total + diff, fs_info->sectorsize));
2846*4882a593Smuzhiyun device->fs_devices->total_rw_bytes += diff;
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun btrfs_device_set_total_bytes(device, new_size);
2849*4882a593Smuzhiyun btrfs_device_set_disk_total_bytes(device, new_size);
2850*4882a593Smuzhiyun btrfs_clear_space_info_full(device->fs_info);
2851*4882a593Smuzhiyun if (list_empty(&device->post_commit_list))
2852*4882a593Smuzhiyun list_add_tail(&device->post_commit_list,
2853*4882a593Smuzhiyun &trans->transaction->dev_update_list);
2854*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
2855*4882a593Smuzhiyun
2856*4882a593Smuzhiyun return btrfs_update_device(trans, device);
2857*4882a593Smuzhiyun }
2858*4882a593Smuzhiyun
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2859*4882a593Smuzhiyun static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2860*4882a593Smuzhiyun {
2861*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2862*4882a593Smuzhiyun struct btrfs_root *root = fs_info->chunk_root;
2863*4882a593Smuzhiyun int ret;
2864*4882a593Smuzhiyun struct btrfs_path *path;
2865*4882a593Smuzhiyun struct btrfs_key key;
2866*4882a593Smuzhiyun
2867*4882a593Smuzhiyun path = btrfs_alloc_path();
2868*4882a593Smuzhiyun if (!path)
2869*4882a593Smuzhiyun return -ENOMEM;
2870*4882a593Smuzhiyun
2871*4882a593Smuzhiyun key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2872*4882a593Smuzhiyun key.offset = chunk_offset;
2873*4882a593Smuzhiyun key.type = BTRFS_CHUNK_ITEM_KEY;
2874*4882a593Smuzhiyun
2875*4882a593Smuzhiyun ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2876*4882a593Smuzhiyun if (ret < 0)
2877*4882a593Smuzhiyun goto out;
2878*4882a593Smuzhiyun else if (ret > 0) { /* Logic error or corruption */
2879*4882a593Smuzhiyun btrfs_handle_fs_error(fs_info, -ENOENT,
2880*4882a593Smuzhiyun "Failed lookup while freeing chunk.");
2881*4882a593Smuzhiyun ret = -ENOENT;
2882*4882a593Smuzhiyun goto out;
2883*4882a593Smuzhiyun }
2884*4882a593Smuzhiyun
2885*4882a593Smuzhiyun ret = btrfs_del_item(trans, root, path);
2886*4882a593Smuzhiyun if (ret < 0)
2887*4882a593Smuzhiyun btrfs_handle_fs_error(fs_info, ret,
2888*4882a593Smuzhiyun "Failed to delete chunk item.");
2889*4882a593Smuzhiyun out:
2890*4882a593Smuzhiyun btrfs_free_path(path);
2891*4882a593Smuzhiyun return ret;
2892*4882a593Smuzhiyun }
2893*4882a593Smuzhiyun
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2894*4882a593Smuzhiyun static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2895*4882a593Smuzhiyun {
2896*4882a593Smuzhiyun struct btrfs_super_block *super_copy = fs_info->super_copy;
2897*4882a593Smuzhiyun struct btrfs_disk_key *disk_key;
2898*4882a593Smuzhiyun struct btrfs_chunk *chunk;
2899*4882a593Smuzhiyun u8 *ptr;
2900*4882a593Smuzhiyun int ret = 0;
2901*4882a593Smuzhiyun u32 num_stripes;
2902*4882a593Smuzhiyun u32 array_size;
2903*4882a593Smuzhiyun u32 len = 0;
2904*4882a593Smuzhiyun u32 cur;
2905*4882a593Smuzhiyun struct btrfs_key key;
2906*4882a593Smuzhiyun
2907*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
2908*4882a593Smuzhiyun array_size = btrfs_super_sys_array_size(super_copy);
2909*4882a593Smuzhiyun
2910*4882a593Smuzhiyun ptr = super_copy->sys_chunk_array;
2911*4882a593Smuzhiyun cur = 0;
2912*4882a593Smuzhiyun
2913*4882a593Smuzhiyun while (cur < array_size) {
2914*4882a593Smuzhiyun disk_key = (struct btrfs_disk_key *)ptr;
2915*4882a593Smuzhiyun btrfs_disk_key_to_cpu(&key, disk_key);
2916*4882a593Smuzhiyun
2917*4882a593Smuzhiyun len = sizeof(*disk_key);
2918*4882a593Smuzhiyun
2919*4882a593Smuzhiyun if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2920*4882a593Smuzhiyun chunk = (struct btrfs_chunk *)(ptr + len);
2921*4882a593Smuzhiyun num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2922*4882a593Smuzhiyun len += btrfs_chunk_item_size(num_stripes);
2923*4882a593Smuzhiyun } else {
2924*4882a593Smuzhiyun ret = -EIO;
2925*4882a593Smuzhiyun break;
2926*4882a593Smuzhiyun }
2927*4882a593Smuzhiyun if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2928*4882a593Smuzhiyun key.offset == chunk_offset) {
2929*4882a593Smuzhiyun memmove(ptr, ptr + len, array_size - (cur + len));
2930*4882a593Smuzhiyun array_size -= len;
2931*4882a593Smuzhiyun btrfs_set_super_sys_array_size(super_copy, array_size);
2932*4882a593Smuzhiyun } else {
2933*4882a593Smuzhiyun ptr += len;
2934*4882a593Smuzhiyun cur += len;
2935*4882a593Smuzhiyun }
2936*4882a593Smuzhiyun }
2937*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
2938*4882a593Smuzhiyun return ret;
2939*4882a593Smuzhiyun }
2940*4882a593Smuzhiyun
2941*4882a593Smuzhiyun /*
2942*4882a593Smuzhiyun * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2943*4882a593Smuzhiyun * @logical: Logical block offset in bytes.
2944*4882a593Smuzhiyun * @length: Length of extent in bytes.
2945*4882a593Smuzhiyun *
2946*4882a593Smuzhiyun * Return: Chunk mapping or ERR_PTR.
2947*4882a593Smuzhiyun */
btrfs_get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2948*4882a593Smuzhiyun struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2949*4882a593Smuzhiyun u64 logical, u64 length)
2950*4882a593Smuzhiyun {
2951*4882a593Smuzhiyun struct extent_map_tree *em_tree;
2952*4882a593Smuzhiyun struct extent_map *em;
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun em_tree = &fs_info->mapping_tree;
2955*4882a593Smuzhiyun read_lock(&em_tree->lock);
2956*4882a593Smuzhiyun em = lookup_extent_mapping(em_tree, logical, length);
2957*4882a593Smuzhiyun read_unlock(&em_tree->lock);
2958*4882a593Smuzhiyun
2959*4882a593Smuzhiyun if (!em) {
2960*4882a593Smuzhiyun btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2961*4882a593Smuzhiyun logical, length);
2962*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun if (em->start > logical || em->start + em->len < logical) {
2966*4882a593Smuzhiyun btrfs_crit(fs_info,
2967*4882a593Smuzhiyun "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2968*4882a593Smuzhiyun logical, length, em->start, em->start + em->len);
2969*4882a593Smuzhiyun free_extent_map(em);
2970*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2971*4882a593Smuzhiyun }
2972*4882a593Smuzhiyun
2973*4882a593Smuzhiyun /* callers are responsible for dropping em's ref. */
2974*4882a593Smuzhiyun return em;
2975*4882a593Smuzhiyun }
2976*4882a593Smuzhiyun
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2977*4882a593Smuzhiyun int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2978*4882a593Smuzhiyun {
2979*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2980*4882a593Smuzhiyun struct extent_map *em;
2981*4882a593Smuzhiyun struct map_lookup *map;
2982*4882a593Smuzhiyun u64 dev_extent_len = 0;
2983*4882a593Smuzhiyun int i, ret = 0;
2984*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2987*4882a593Smuzhiyun if (IS_ERR(em)) {
2988*4882a593Smuzhiyun /*
2989*4882a593Smuzhiyun * This is a logic error, but we don't want to just rely on the
2990*4882a593Smuzhiyun * user having built with ASSERT enabled, so if ASSERT doesn't
2991*4882a593Smuzhiyun * do anything we still error out.
2992*4882a593Smuzhiyun */
2993*4882a593Smuzhiyun ASSERT(0);
2994*4882a593Smuzhiyun return PTR_ERR(em);
2995*4882a593Smuzhiyun }
2996*4882a593Smuzhiyun map = em->map_lookup;
2997*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
2998*4882a593Smuzhiyun check_system_chunk(trans, map->type);
2999*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
3000*4882a593Smuzhiyun
3001*4882a593Smuzhiyun /*
3002*4882a593Smuzhiyun * Take the device list mutex to prevent races with the final phase of
3003*4882a593Smuzhiyun * a device replace operation that replaces the device object associated
3004*4882a593Smuzhiyun * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
3005*4882a593Smuzhiyun */
3006*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
3007*4882a593Smuzhiyun for (i = 0; i < map->num_stripes; i++) {
3008*4882a593Smuzhiyun struct btrfs_device *device = map->stripes[i].dev;
3009*4882a593Smuzhiyun ret = btrfs_free_dev_extent(trans, device,
3010*4882a593Smuzhiyun map->stripes[i].physical,
3011*4882a593Smuzhiyun &dev_extent_len);
3012*4882a593Smuzhiyun if (ret) {
3013*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
3014*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
3015*4882a593Smuzhiyun goto out;
3016*4882a593Smuzhiyun }
3017*4882a593Smuzhiyun
3018*4882a593Smuzhiyun if (device->bytes_used > 0) {
3019*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
3020*4882a593Smuzhiyun btrfs_device_set_bytes_used(device,
3021*4882a593Smuzhiyun device->bytes_used - dev_extent_len);
3022*4882a593Smuzhiyun atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3023*4882a593Smuzhiyun btrfs_clear_space_info_full(fs_info);
3024*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
3025*4882a593Smuzhiyun }
3026*4882a593Smuzhiyun
3027*4882a593Smuzhiyun ret = btrfs_update_device(trans, device);
3028*4882a593Smuzhiyun if (ret) {
3029*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
3030*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
3031*4882a593Smuzhiyun goto out;
3032*4882a593Smuzhiyun }
3033*4882a593Smuzhiyun }
3034*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
3035*4882a593Smuzhiyun
3036*4882a593Smuzhiyun ret = btrfs_free_chunk(trans, chunk_offset);
3037*4882a593Smuzhiyun if (ret) {
3038*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
3039*4882a593Smuzhiyun goto out;
3040*4882a593Smuzhiyun }
3041*4882a593Smuzhiyun
3042*4882a593Smuzhiyun trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3045*4882a593Smuzhiyun ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3046*4882a593Smuzhiyun if (ret) {
3047*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
3048*4882a593Smuzhiyun goto out;
3049*4882a593Smuzhiyun }
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun
3052*4882a593Smuzhiyun ret = btrfs_remove_block_group(trans, chunk_offset, em);
3053*4882a593Smuzhiyun if (ret) {
3054*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
3055*4882a593Smuzhiyun goto out;
3056*4882a593Smuzhiyun }
3057*4882a593Smuzhiyun
3058*4882a593Smuzhiyun out:
3059*4882a593Smuzhiyun /* once for us */
3060*4882a593Smuzhiyun free_extent_map(em);
3061*4882a593Smuzhiyun return ret;
3062*4882a593Smuzhiyun }
3063*4882a593Smuzhiyun
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3064*4882a593Smuzhiyun static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3065*4882a593Smuzhiyun {
3066*4882a593Smuzhiyun struct btrfs_root *root = fs_info->chunk_root;
3067*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
3068*4882a593Smuzhiyun struct btrfs_block_group *block_group;
3069*4882a593Smuzhiyun int ret;
3070*4882a593Smuzhiyun
3071*4882a593Smuzhiyun /*
3072*4882a593Smuzhiyun * Prevent races with automatic removal of unused block groups.
3073*4882a593Smuzhiyun * After we relocate and before we remove the chunk with offset
3074*4882a593Smuzhiyun * chunk_offset, automatic removal of the block group can kick in,
3075*4882a593Smuzhiyun * resulting in a failure when calling btrfs_remove_chunk() below.
3076*4882a593Smuzhiyun *
3077*4882a593Smuzhiyun * Make sure to acquire this mutex before doing a tree search (dev
3078*4882a593Smuzhiyun * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3079*4882a593Smuzhiyun * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3080*4882a593Smuzhiyun * we release the path used to search the chunk/dev tree and before
3081*4882a593Smuzhiyun * the current task acquires this mutex and calls us.
3082*4882a593Smuzhiyun */
3083*4882a593Smuzhiyun lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3084*4882a593Smuzhiyun
3085*4882a593Smuzhiyun /* step one, relocate all the extents inside this chunk */
3086*4882a593Smuzhiyun btrfs_scrub_pause(fs_info);
3087*4882a593Smuzhiyun ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3088*4882a593Smuzhiyun btrfs_scrub_continue(fs_info);
3089*4882a593Smuzhiyun if (ret)
3090*4882a593Smuzhiyun return ret;
3091*4882a593Smuzhiyun
3092*4882a593Smuzhiyun block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3093*4882a593Smuzhiyun if (!block_group)
3094*4882a593Smuzhiyun return -ENOENT;
3095*4882a593Smuzhiyun btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3096*4882a593Smuzhiyun btrfs_put_block_group(block_group);
3097*4882a593Smuzhiyun
3098*4882a593Smuzhiyun trans = btrfs_start_trans_remove_block_group(root->fs_info,
3099*4882a593Smuzhiyun chunk_offset);
3100*4882a593Smuzhiyun if (IS_ERR(trans)) {
3101*4882a593Smuzhiyun ret = PTR_ERR(trans);
3102*4882a593Smuzhiyun btrfs_handle_fs_error(root->fs_info, ret, NULL);
3103*4882a593Smuzhiyun return ret;
3104*4882a593Smuzhiyun }
3105*4882a593Smuzhiyun
3106*4882a593Smuzhiyun /*
3107*4882a593Smuzhiyun * step two, delete the device extents and the
3108*4882a593Smuzhiyun * chunk tree entries
3109*4882a593Smuzhiyun */
3110*4882a593Smuzhiyun ret = btrfs_remove_chunk(trans, chunk_offset);
3111*4882a593Smuzhiyun btrfs_end_transaction(trans);
3112*4882a593Smuzhiyun return ret;
3113*4882a593Smuzhiyun }
3114*4882a593Smuzhiyun
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)3115*4882a593Smuzhiyun static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3116*4882a593Smuzhiyun {
3117*4882a593Smuzhiyun struct btrfs_root *chunk_root = fs_info->chunk_root;
3118*4882a593Smuzhiyun struct btrfs_path *path;
3119*4882a593Smuzhiyun struct extent_buffer *leaf;
3120*4882a593Smuzhiyun struct btrfs_chunk *chunk;
3121*4882a593Smuzhiyun struct btrfs_key key;
3122*4882a593Smuzhiyun struct btrfs_key found_key;
3123*4882a593Smuzhiyun u64 chunk_type;
3124*4882a593Smuzhiyun bool retried = false;
3125*4882a593Smuzhiyun int failed = 0;
3126*4882a593Smuzhiyun int ret;
3127*4882a593Smuzhiyun
3128*4882a593Smuzhiyun path = btrfs_alloc_path();
3129*4882a593Smuzhiyun if (!path)
3130*4882a593Smuzhiyun return -ENOMEM;
3131*4882a593Smuzhiyun
3132*4882a593Smuzhiyun again:
3133*4882a593Smuzhiyun key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3134*4882a593Smuzhiyun key.offset = (u64)-1;
3135*4882a593Smuzhiyun key.type = BTRFS_CHUNK_ITEM_KEY;
3136*4882a593Smuzhiyun
3137*4882a593Smuzhiyun while (1) {
3138*4882a593Smuzhiyun mutex_lock(&fs_info->delete_unused_bgs_mutex);
3139*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3140*4882a593Smuzhiyun if (ret < 0) {
3141*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3142*4882a593Smuzhiyun goto error;
3143*4882a593Smuzhiyun }
3144*4882a593Smuzhiyun BUG_ON(ret == 0); /* Corruption */
3145*4882a593Smuzhiyun
3146*4882a593Smuzhiyun ret = btrfs_previous_item(chunk_root, path, key.objectid,
3147*4882a593Smuzhiyun key.type);
3148*4882a593Smuzhiyun if (ret)
3149*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3150*4882a593Smuzhiyun if (ret < 0)
3151*4882a593Smuzhiyun goto error;
3152*4882a593Smuzhiyun if (ret > 0)
3153*4882a593Smuzhiyun break;
3154*4882a593Smuzhiyun
3155*4882a593Smuzhiyun leaf = path->nodes[0];
3156*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3157*4882a593Smuzhiyun
3158*4882a593Smuzhiyun chunk = btrfs_item_ptr(leaf, path->slots[0],
3159*4882a593Smuzhiyun struct btrfs_chunk);
3160*4882a593Smuzhiyun chunk_type = btrfs_chunk_type(leaf, chunk);
3161*4882a593Smuzhiyun btrfs_release_path(path);
3162*4882a593Smuzhiyun
3163*4882a593Smuzhiyun if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3164*4882a593Smuzhiyun ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3165*4882a593Smuzhiyun if (ret == -ENOSPC)
3166*4882a593Smuzhiyun failed++;
3167*4882a593Smuzhiyun else
3168*4882a593Smuzhiyun BUG_ON(ret);
3169*4882a593Smuzhiyun }
3170*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3171*4882a593Smuzhiyun
3172*4882a593Smuzhiyun if (found_key.offset == 0)
3173*4882a593Smuzhiyun break;
3174*4882a593Smuzhiyun key.offset = found_key.offset - 1;
3175*4882a593Smuzhiyun }
3176*4882a593Smuzhiyun ret = 0;
3177*4882a593Smuzhiyun if (failed && !retried) {
3178*4882a593Smuzhiyun failed = 0;
3179*4882a593Smuzhiyun retried = true;
3180*4882a593Smuzhiyun goto again;
3181*4882a593Smuzhiyun } else if (WARN_ON(failed && retried)) {
3182*4882a593Smuzhiyun ret = -ENOSPC;
3183*4882a593Smuzhiyun }
3184*4882a593Smuzhiyun error:
3185*4882a593Smuzhiyun btrfs_free_path(path);
3186*4882a593Smuzhiyun return ret;
3187*4882a593Smuzhiyun }
3188*4882a593Smuzhiyun
3189*4882a593Smuzhiyun /*
3190*4882a593Smuzhiyun * return 1 : allocate a data chunk successfully,
3191*4882a593Smuzhiyun * return <0: errors during allocating a data chunk,
3192*4882a593Smuzhiyun * return 0 : no need to allocate a data chunk.
3193*4882a593Smuzhiyun */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3194*4882a593Smuzhiyun static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3195*4882a593Smuzhiyun u64 chunk_offset)
3196*4882a593Smuzhiyun {
3197*4882a593Smuzhiyun struct btrfs_block_group *cache;
3198*4882a593Smuzhiyun u64 bytes_used;
3199*4882a593Smuzhiyun u64 chunk_type;
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3202*4882a593Smuzhiyun ASSERT(cache);
3203*4882a593Smuzhiyun chunk_type = cache->flags;
3204*4882a593Smuzhiyun btrfs_put_block_group(cache);
3205*4882a593Smuzhiyun
3206*4882a593Smuzhiyun if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3207*4882a593Smuzhiyun return 0;
3208*4882a593Smuzhiyun
3209*4882a593Smuzhiyun spin_lock(&fs_info->data_sinfo->lock);
3210*4882a593Smuzhiyun bytes_used = fs_info->data_sinfo->bytes_used;
3211*4882a593Smuzhiyun spin_unlock(&fs_info->data_sinfo->lock);
3212*4882a593Smuzhiyun
3213*4882a593Smuzhiyun if (!bytes_used) {
3214*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
3215*4882a593Smuzhiyun int ret;
3216*4882a593Smuzhiyun
3217*4882a593Smuzhiyun trans = btrfs_join_transaction(fs_info->tree_root);
3218*4882a593Smuzhiyun if (IS_ERR(trans))
3219*4882a593Smuzhiyun return PTR_ERR(trans);
3220*4882a593Smuzhiyun
3221*4882a593Smuzhiyun ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3222*4882a593Smuzhiyun btrfs_end_transaction(trans);
3223*4882a593Smuzhiyun if (ret < 0)
3224*4882a593Smuzhiyun return ret;
3225*4882a593Smuzhiyun return 1;
3226*4882a593Smuzhiyun }
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun return 0;
3229*4882a593Smuzhiyun }
3230*4882a593Smuzhiyun
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3231*4882a593Smuzhiyun static int insert_balance_item(struct btrfs_fs_info *fs_info,
3232*4882a593Smuzhiyun struct btrfs_balance_control *bctl)
3233*4882a593Smuzhiyun {
3234*4882a593Smuzhiyun struct btrfs_root *root = fs_info->tree_root;
3235*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
3236*4882a593Smuzhiyun struct btrfs_balance_item *item;
3237*4882a593Smuzhiyun struct btrfs_disk_balance_args disk_bargs;
3238*4882a593Smuzhiyun struct btrfs_path *path;
3239*4882a593Smuzhiyun struct extent_buffer *leaf;
3240*4882a593Smuzhiyun struct btrfs_key key;
3241*4882a593Smuzhiyun int ret, err;
3242*4882a593Smuzhiyun
3243*4882a593Smuzhiyun path = btrfs_alloc_path();
3244*4882a593Smuzhiyun if (!path)
3245*4882a593Smuzhiyun return -ENOMEM;
3246*4882a593Smuzhiyun
3247*4882a593Smuzhiyun trans = btrfs_start_transaction(root, 0);
3248*4882a593Smuzhiyun if (IS_ERR(trans)) {
3249*4882a593Smuzhiyun btrfs_free_path(path);
3250*4882a593Smuzhiyun return PTR_ERR(trans);
3251*4882a593Smuzhiyun }
3252*4882a593Smuzhiyun
3253*4882a593Smuzhiyun key.objectid = BTRFS_BALANCE_OBJECTID;
3254*4882a593Smuzhiyun key.type = BTRFS_TEMPORARY_ITEM_KEY;
3255*4882a593Smuzhiyun key.offset = 0;
3256*4882a593Smuzhiyun
3257*4882a593Smuzhiyun ret = btrfs_insert_empty_item(trans, root, path, &key,
3258*4882a593Smuzhiyun sizeof(*item));
3259*4882a593Smuzhiyun if (ret)
3260*4882a593Smuzhiyun goto out;
3261*4882a593Smuzhiyun
3262*4882a593Smuzhiyun leaf = path->nodes[0];
3263*4882a593Smuzhiyun item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3266*4882a593Smuzhiyun
3267*4882a593Smuzhiyun btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3268*4882a593Smuzhiyun btrfs_set_balance_data(leaf, item, &disk_bargs);
3269*4882a593Smuzhiyun btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3270*4882a593Smuzhiyun btrfs_set_balance_meta(leaf, item, &disk_bargs);
3271*4882a593Smuzhiyun btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3272*4882a593Smuzhiyun btrfs_set_balance_sys(leaf, item, &disk_bargs);
3273*4882a593Smuzhiyun
3274*4882a593Smuzhiyun btrfs_set_balance_flags(leaf, item, bctl->flags);
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun btrfs_mark_buffer_dirty(leaf);
3277*4882a593Smuzhiyun out:
3278*4882a593Smuzhiyun btrfs_free_path(path);
3279*4882a593Smuzhiyun err = btrfs_commit_transaction(trans);
3280*4882a593Smuzhiyun if (err && !ret)
3281*4882a593Smuzhiyun ret = err;
3282*4882a593Smuzhiyun return ret;
3283*4882a593Smuzhiyun }
3284*4882a593Smuzhiyun
del_balance_item(struct btrfs_fs_info * fs_info)3285*4882a593Smuzhiyun static int del_balance_item(struct btrfs_fs_info *fs_info)
3286*4882a593Smuzhiyun {
3287*4882a593Smuzhiyun struct btrfs_root *root = fs_info->tree_root;
3288*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
3289*4882a593Smuzhiyun struct btrfs_path *path;
3290*4882a593Smuzhiyun struct btrfs_key key;
3291*4882a593Smuzhiyun int ret, err;
3292*4882a593Smuzhiyun
3293*4882a593Smuzhiyun path = btrfs_alloc_path();
3294*4882a593Smuzhiyun if (!path)
3295*4882a593Smuzhiyun return -ENOMEM;
3296*4882a593Smuzhiyun
3297*4882a593Smuzhiyun trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3298*4882a593Smuzhiyun if (IS_ERR(trans)) {
3299*4882a593Smuzhiyun btrfs_free_path(path);
3300*4882a593Smuzhiyun return PTR_ERR(trans);
3301*4882a593Smuzhiyun }
3302*4882a593Smuzhiyun
3303*4882a593Smuzhiyun key.objectid = BTRFS_BALANCE_OBJECTID;
3304*4882a593Smuzhiyun key.type = BTRFS_TEMPORARY_ITEM_KEY;
3305*4882a593Smuzhiyun key.offset = 0;
3306*4882a593Smuzhiyun
3307*4882a593Smuzhiyun ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3308*4882a593Smuzhiyun if (ret < 0)
3309*4882a593Smuzhiyun goto out;
3310*4882a593Smuzhiyun if (ret > 0) {
3311*4882a593Smuzhiyun ret = -ENOENT;
3312*4882a593Smuzhiyun goto out;
3313*4882a593Smuzhiyun }
3314*4882a593Smuzhiyun
3315*4882a593Smuzhiyun ret = btrfs_del_item(trans, root, path);
3316*4882a593Smuzhiyun out:
3317*4882a593Smuzhiyun btrfs_free_path(path);
3318*4882a593Smuzhiyun err = btrfs_commit_transaction(trans);
3319*4882a593Smuzhiyun if (err && !ret)
3320*4882a593Smuzhiyun ret = err;
3321*4882a593Smuzhiyun return ret;
3322*4882a593Smuzhiyun }
3323*4882a593Smuzhiyun
3324*4882a593Smuzhiyun /*
3325*4882a593Smuzhiyun * This is a heuristic used to reduce the number of chunks balanced on
3326*4882a593Smuzhiyun * resume after balance was interrupted.
3327*4882a593Smuzhiyun */
update_balance_args(struct btrfs_balance_control * bctl)3328*4882a593Smuzhiyun static void update_balance_args(struct btrfs_balance_control *bctl)
3329*4882a593Smuzhiyun {
3330*4882a593Smuzhiyun /*
3331*4882a593Smuzhiyun * Turn on soft mode for chunk types that were being converted.
3332*4882a593Smuzhiyun */
3333*4882a593Smuzhiyun if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3334*4882a593Smuzhiyun bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3335*4882a593Smuzhiyun if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3336*4882a593Smuzhiyun bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3337*4882a593Smuzhiyun if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3338*4882a593Smuzhiyun bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3339*4882a593Smuzhiyun
3340*4882a593Smuzhiyun /*
3341*4882a593Smuzhiyun * Turn on usage filter if is not already used. The idea is
3342*4882a593Smuzhiyun * that chunks that we have already balanced should be
3343*4882a593Smuzhiyun * reasonably full. Don't do it for chunks that are being
3344*4882a593Smuzhiyun * converted - that will keep us from relocating unconverted
3345*4882a593Smuzhiyun * (albeit full) chunks.
3346*4882a593Smuzhiyun */
3347*4882a593Smuzhiyun if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3348*4882a593Smuzhiyun !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3349*4882a593Smuzhiyun !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3350*4882a593Smuzhiyun bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3351*4882a593Smuzhiyun bctl->data.usage = 90;
3352*4882a593Smuzhiyun }
3353*4882a593Smuzhiyun if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3354*4882a593Smuzhiyun !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3355*4882a593Smuzhiyun !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3356*4882a593Smuzhiyun bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3357*4882a593Smuzhiyun bctl->sys.usage = 90;
3358*4882a593Smuzhiyun }
3359*4882a593Smuzhiyun if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3360*4882a593Smuzhiyun !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3361*4882a593Smuzhiyun !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3362*4882a593Smuzhiyun bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3363*4882a593Smuzhiyun bctl->meta.usage = 90;
3364*4882a593Smuzhiyun }
3365*4882a593Smuzhiyun }
3366*4882a593Smuzhiyun
3367*4882a593Smuzhiyun /*
3368*4882a593Smuzhiyun * Clear the balance status in fs_info and delete the balance item from disk.
3369*4882a593Smuzhiyun */
reset_balance_state(struct btrfs_fs_info * fs_info)3370*4882a593Smuzhiyun static void reset_balance_state(struct btrfs_fs_info *fs_info)
3371*4882a593Smuzhiyun {
3372*4882a593Smuzhiyun struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3373*4882a593Smuzhiyun int ret;
3374*4882a593Smuzhiyun
3375*4882a593Smuzhiyun BUG_ON(!fs_info->balance_ctl);
3376*4882a593Smuzhiyun
3377*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
3378*4882a593Smuzhiyun fs_info->balance_ctl = NULL;
3379*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
3380*4882a593Smuzhiyun
3381*4882a593Smuzhiyun kfree(bctl);
3382*4882a593Smuzhiyun ret = del_balance_item(fs_info);
3383*4882a593Smuzhiyun if (ret)
3384*4882a593Smuzhiyun btrfs_handle_fs_error(fs_info, ret, NULL);
3385*4882a593Smuzhiyun }
3386*4882a593Smuzhiyun
3387*4882a593Smuzhiyun /*
3388*4882a593Smuzhiyun * Balance filters. Return 1 if chunk should be filtered out
3389*4882a593Smuzhiyun * (should not be balanced).
3390*4882a593Smuzhiyun */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3391*4882a593Smuzhiyun static int chunk_profiles_filter(u64 chunk_type,
3392*4882a593Smuzhiyun struct btrfs_balance_args *bargs)
3393*4882a593Smuzhiyun {
3394*4882a593Smuzhiyun chunk_type = chunk_to_extended(chunk_type) &
3395*4882a593Smuzhiyun BTRFS_EXTENDED_PROFILE_MASK;
3396*4882a593Smuzhiyun
3397*4882a593Smuzhiyun if (bargs->profiles & chunk_type)
3398*4882a593Smuzhiyun return 0;
3399*4882a593Smuzhiyun
3400*4882a593Smuzhiyun return 1;
3401*4882a593Smuzhiyun }
3402*4882a593Smuzhiyun
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3403*4882a593Smuzhiyun static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3404*4882a593Smuzhiyun struct btrfs_balance_args *bargs)
3405*4882a593Smuzhiyun {
3406*4882a593Smuzhiyun struct btrfs_block_group *cache;
3407*4882a593Smuzhiyun u64 chunk_used;
3408*4882a593Smuzhiyun u64 user_thresh_min;
3409*4882a593Smuzhiyun u64 user_thresh_max;
3410*4882a593Smuzhiyun int ret = 1;
3411*4882a593Smuzhiyun
3412*4882a593Smuzhiyun cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3413*4882a593Smuzhiyun chunk_used = cache->used;
3414*4882a593Smuzhiyun
3415*4882a593Smuzhiyun if (bargs->usage_min == 0)
3416*4882a593Smuzhiyun user_thresh_min = 0;
3417*4882a593Smuzhiyun else
3418*4882a593Smuzhiyun user_thresh_min = div_factor_fine(cache->length,
3419*4882a593Smuzhiyun bargs->usage_min);
3420*4882a593Smuzhiyun
3421*4882a593Smuzhiyun if (bargs->usage_max == 0)
3422*4882a593Smuzhiyun user_thresh_max = 1;
3423*4882a593Smuzhiyun else if (bargs->usage_max > 100)
3424*4882a593Smuzhiyun user_thresh_max = cache->length;
3425*4882a593Smuzhiyun else
3426*4882a593Smuzhiyun user_thresh_max = div_factor_fine(cache->length,
3427*4882a593Smuzhiyun bargs->usage_max);
3428*4882a593Smuzhiyun
3429*4882a593Smuzhiyun if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3430*4882a593Smuzhiyun ret = 0;
3431*4882a593Smuzhiyun
3432*4882a593Smuzhiyun btrfs_put_block_group(cache);
3433*4882a593Smuzhiyun return ret;
3434*4882a593Smuzhiyun }
3435*4882a593Smuzhiyun
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3436*4882a593Smuzhiyun static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3437*4882a593Smuzhiyun u64 chunk_offset, struct btrfs_balance_args *bargs)
3438*4882a593Smuzhiyun {
3439*4882a593Smuzhiyun struct btrfs_block_group *cache;
3440*4882a593Smuzhiyun u64 chunk_used, user_thresh;
3441*4882a593Smuzhiyun int ret = 1;
3442*4882a593Smuzhiyun
3443*4882a593Smuzhiyun cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3444*4882a593Smuzhiyun chunk_used = cache->used;
3445*4882a593Smuzhiyun
3446*4882a593Smuzhiyun if (bargs->usage_min == 0)
3447*4882a593Smuzhiyun user_thresh = 1;
3448*4882a593Smuzhiyun else if (bargs->usage > 100)
3449*4882a593Smuzhiyun user_thresh = cache->length;
3450*4882a593Smuzhiyun else
3451*4882a593Smuzhiyun user_thresh = div_factor_fine(cache->length, bargs->usage);
3452*4882a593Smuzhiyun
3453*4882a593Smuzhiyun if (chunk_used < user_thresh)
3454*4882a593Smuzhiyun ret = 0;
3455*4882a593Smuzhiyun
3456*4882a593Smuzhiyun btrfs_put_block_group(cache);
3457*4882a593Smuzhiyun return ret;
3458*4882a593Smuzhiyun }
3459*4882a593Smuzhiyun
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3460*4882a593Smuzhiyun static int chunk_devid_filter(struct extent_buffer *leaf,
3461*4882a593Smuzhiyun struct btrfs_chunk *chunk,
3462*4882a593Smuzhiyun struct btrfs_balance_args *bargs)
3463*4882a593Smuzhiyun {
3464*4882a593Smuzhiyun struct btrfs_stripe *stripe;
3465*4882a593Smuzhiyun int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3466*4882a593Smuzhiyun int i;
3467*4882a593Smuzhiyun
3468*4882a593Smuzhiyun for (i = 0; i < num_stripes; i++) {
3469*4882a593Smuzhiyun stripe = btrfs_stripe_nr(chunk, i);
3470*4882a593Smuzhiyun if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3471*4882a593Smuzhiyun return 0;
3472*4882a593Smuzhiyun }
3473*4882a593Smuzhiyun
3474*4882a593Smuzhiyun return 1;
3475*4882a593Smuzhiyun }
3476*4882a593Smuzhiyun
calc_data_stripes(u64 type,int num_stripes)3477*4882a593Smuzhiyun static u64 calc_data_stripes(u64 type, int num_stripes)
3478*4882a593Smuzhiyun {
3479*4882a593Smuzhiyun const int index = btrfs_bg_flags_to_raid_index(type);
3480*4882a593Smuzhiyun const int ncopies = btrfs_raid_array[index].ncopies;
3481*4882a593Smuzhiyun const int nparity = btrfs_raid_array[index].nparity;
3482*4882a593Smuzhiyun
3483*4882a593Smuzhiyun if (nparity)
3484*4882a593Smuzhiyun return num_stripes - nparity;
3485*4882a593Smuzhiyun else
3486*4882a593Smuzhiyun return num_stripes / ncopies;
3487*4882a593Smuzhiyun }
3488*4882a593Smuzhiyun
3489*4882a593Smuzhiyun /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3490*4882a593Smuzhiyun static int chunk_drange_filter(struct extent_buffer *leaf,
3491*4882a593Smuzhiyun struct btrfs_chunk *chunk,
3492*4882a593Smuzhiyun struct btrfs_balance_args *bargs)
3493*4882a593Smuzhiyun {
3494*4882a593Smuzhiyun struct btrfs_stripe *stripe;
3495*4882a593Smuzhiyun int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3496*4882a593Smuzhiyun u64 stripe_offset;
3497*4882a593Smuzhiyun u64 stripe_length;
3498*4882a593Smuzhiyun u64 type;
3499*4882a593Smuzhiyun int factor;
3500*4882a593Smuzhiyun int i;
3501*4882a593Smuzhiyun
3502*4882a593Smuzhiyun if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3503*4882a593Smuzhiyun return 0;
3504*4882a593Smuzhiyun
3505*4882a593Smuzhiyun type = btrfs_chunk_type(leaf, chunk);
3506*4882a593Smuzhiyun factor = calc_data_stripes(type, num_stripes);
3507*4882a593Smuzhiyun
3508*4882a593Smuzhiyun for (i = 0; i < num_stripes; i++) {
3509*4882a593Smuzhiyun stripe = btrfs_stripe_nr(chunk, i);
3510*4882a593Smuzhiyun if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3511*4882a593Smuzhiyun continue;
3512*4882a593Smuzhiyun
3513*4882a593Smuzhiyun stripe_offset = btrfs_stripe_offset(leaf, stripe);
3514*4882a593Smuzhiyun stripe_length = btrfs_chunk_length(leaf, chunk);
3515*4882a593Smuzhiyun stripe_length = div_u64(stripe_length, factor);
3516*4882a593Smuzhiyun
3517*4882a593Smuzhiyun if (stripe_offset < bargs->pend &&
3518*4882a593Smuzhiyun stripe_offset + stripe_length > bargs->pstart)
3519*4882a593Smuzhiyun return 0;
3520*4882a593Smuzhiyun }
3521*4882a593Smuzhiyun
3522*4882a593Smuzhiyun return 1;
3523*4882a593Smuzhiyun }
3524*4882a593Smuzhiyun
3525*4882a593Smuzhiyun /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3526*4882a593Smuzhiyun static int chunk_vrange_filter(struct extent_buffer *leaf,
3527*4882a593Smuzhiyun struct btrfs_chunk *chunk,
3528*4882a593Smuzhiyun u64 chunk_offset,
3529*4882a593Smuzhiyun struct btrfs_balance_args *bargs)
3530*4882a593Smuzhiyun {
3531*4882a593Smuzhiyun if (chunk_offset < bargs->vend &&
3532*4882a593Smuzhiyun chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3533*4882a593Smuzhiyun /* at least part of the chunk is inside this vrange */
3534*4882a593Smuzhiyun return 0;
3535*4882a593Smuzhiyun
3536*4882a593Smuzhiyun return 1;
3537*4882a593Smuzhiyun }
3538*4882a593Smuzhiyun
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3539*4882a593Smuzhiyun static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3540*4882a593Smuzhiyun struct btrfs_chunk *chunk,
3541*4882a593Smuzhiyun struct btrfs_balance_args *bargs)
3542*4882a593Smuzhiyun {
3543*4882a593Smuzhiyun int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3544*4882a593Smuzhiyun
3545*4882a593Smuzhiyun if (bargs->stripes_min <= num_stripes
3546*4882a593Smuzhiyun && num_stripes <= bargs->stripes_max)
3547*4882a593Smuzhiyun return 0;
3548*4882a593Smuzhiyun
3549*4882a593Smuzhiyun return 1;
3550*4882a593Smuzhiyun }
3551*4882a593Smuzhiyun
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3552*4882a593Smuzhiyun static int chunk_soft_convert_filter(u64 chunk_type,
3553*4882a593Smuzhiyun struct btrfs_balance_args *bargs)
3554*4882a593Smuzhiyun {
3555*4882a593Smuzhiyun if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3556*4882a593Smuzhiyun return 0;
3557*4882a593Smuzhiyun
3558*4882a593Smuzhiyun chunk_type = chunk_to_extended(chunk_type) &
3559*4882a593Smuzhiyun BTRFS_EXTENDED_PROFILE_MASK;
3560*4882a593Smuzhiyun
3561*4882a593Smuzhiyun if (bargs->target == chunk_type)
3562*4882a593Smuzhiyun return 1;
3563*4882a593Smuzhiyun
3564*4882a593Smuzhiyun return 0;
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun
should_balance_chunk(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3567*4882a593Smuzhiyun static int should_balance_chunk(struct extent_buffer *leaf,
3568*4882a593Smuzhiyun struct btrfs_chunk *chunk, u64 chunk_offset)
3569*4882a593Smuzhiyun {
3570*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = leaf->fs_info;
3571*4882a593Smuzhiyun struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3572*4882a593Smuzhiyun struct btrfs_balance_args *bargs = NULL;
3573*4882a593Smuzhiyun u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3574*4882a593Smuzhiyun
3575*4882a593Smuzhiyun /* type filter */
3576*4882a593Smuzhiyun if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3577*4882a593Smuzhiyun (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3578*4882a593Smuzhiyun return 0;
3579*4882a593Smuzhiyun }
3580*4882a593Smuzhiyun
3581*4882a593Smuzhiyun if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3582*4882a593Smuzhiyun bargs = &bctl->data;
3583*4882a593Smuzhiyun else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3584*4882a593Smuzhiyun bargs = &bctl->sys;
3585*4882a593Smuzhiyun else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3586*4882a593Smuzhiyun bargs = &bctl->meta;
3587*4882a593Smuzhiyun
3588*4882a593Smuzhiyun /* profiles filter */
3589*4882a593Smuzhiyun if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3590*4882a593Smuzhiyun chunk_profiles_filter(chunk_type, bargs)) {
3591*4882a593Smuzhiyun return 0;
3592*4882a593Smuzhiyun }
3593*4882a593Smuzhiyun
3594*4882a593Smuzhiyun /* usage filter */
3595*4882a593Smuzhiyun if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3596*4882a593Smuzhiyun chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3597*4882a593Smuzhiyun return 0;
3598*4882a593Smuzhiyun } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3599*4882a593Smuzhiyun chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3600*4882a593Smuzhiyun return 0;
3601*4882a593Smuzhiyun }
3602*4882a593Smuzhiyun
3603*4882a593Smuzhiyun /* devid filter */
3604*4882a593Smuzhiyun if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3605*4882a593Smuzhiyun chunk_devid_filter(leaf, chunk, bargs)) {
3606*4882a593Smuzhiyun return 0;
3607*4882a593Smuzhiyun }
3608*4882a593Smuzhiyun
3609*4882a593Smuzhiyun /* drange filter, makes sense only with devid filter */
3610*4882a593Smuzhiyun if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3611*4882a593Smuzhiyun chunk_drange_filter(leaf, chunk, bargs)) {
3612*4882a593Smuzhiyun return 0;
3613*4882a593Smuzhiyun }
3614*4882a593Smuzhiyun
3615*4882a593Smuzhiyun /* vrange filter */
3616*4882a593Smuzhiyun if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3617*4882a593Smuzhiyun chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3618*4882a593Smuzhiyun return 0;
3619*4882a593Smuzhiyun }
3620*4882a593Smuzhiyun
3621*4882a593Smuzhiyun /* stripes filter */
3622*4882a593Smuzhiyun if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3623*4882a593Smuzhiyun chunk_stripes_range_filter(leaf, chunk, bargs)) {
3624*4882a593Smuzhiyun return 0;
3625*4882a593Smuzhiyun }
3626*4882a593Smuzhiyun
3627*4882a593Smuzhiyun /* soft profile changing mode */
3628*4882a593Smuzhiyun if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3629*4882a593Smuzhiyun chunk_soft_convert_filter(chunk_type, bargs)) {
3630*4882a593Smuzhiyun return 0;
3631*4882a593Smuzhiyun }
3632*4882a593Smuzhiyun
3633*4882a593Smuzhiyun /*
3634*4882a593Smuzhiyun * limited by count, must be the last filter
3635*4882a593Smuzhiyun */
3636*4882a593Smuzhiyun if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3637*4882a593Smuzhiyun if (bargs->limit == 0)
3638*4882a593Smuzhiyun return 0;
3639*4882a593Smuzhiyun else
3640*4882a593Smuzhiyun bargs->limit--;
3641*4882a593Smuzhiyun } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3642*4882a593Smuzhiyun /*
3643*4882a593Smuzhiyun * Same logic as the 'limit' filter; the minimum cannot be
3644*4882a593Smuzhiyun * determined here because we do not have the global information
3645*4882a593Smuzhiyun * about the count of all chunks that satisfy the filters.
3646*4882a593Smuzhiyun */
3647*4882a593Smuzhiyun if (bargs->limit_max == 0)
3648*4882a593Smuzhiyun return 0;
3649*4882a593Smuzhiyun else
3650*4882a593Smuzhiyun bargs->limit_max--;
3651*4882a593Smuzhiyun }
3652*4882a593Smuzhiyun
3653*4882a593Smuzhiyun return 1;
3654*4882a593Smuzhiyun }
3655*4882a593Smuzhiyun
__btrfs_balance(struct btrfs_fs_info * fs_info)3656*4882a593Smuzhiyun static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3657*4882a593Smuzhiyun {
3658*4882a593Smuzhiyun struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3659*4882a593Smuzhiyun struct btrfs_root *chunk_root = fs_info->chunk_root;
3660*4882a593Smuzhiyun u64 chunk_type;
3661*4882a593Smuzhiyun struct btrfs_chunk *chunk;
3662*4882a593Smuzhiyun struct btrfs_path *path = NULL;
3663*4882a593Smuzhiyun struct btrfs_key key;
3664*4882a593Smuzhiyun struct btrfs_key found_key;
3665*4882a593Smuzhiyun struct extent_buffer *leaf;
3666*4882a593Smuzhiyun int slot;
3667*4882a593Smuzhiyun int ret;
3668*4882a593Smuzhiyun int enospc_errors = 0;
3669*4882a593Smuzhiyun bool counting = true;
3670*4882a593Smuzhiyun /* The single value limit and min/max limits use the same bytes in the */
3671*4882a593Smuzhiyun u64 limit_data = bctl->data.limit;
3672*4882a593Smuzhiyun u64 limit_meta = bctl->meta.limit;
3673*4882a593Smuzhiyun u64 limit_sys = bctl->sys.limit;
3674*4882a593Smuzhiyun u32 count_data = 0;
3675*4882a593Smuzhiyun u32 count_meta = 0;
3676*4882a593Smuzhiyun u32 count_sys = 0;
3677*4882a593Smuzhiyun int chunk_reserved = 0;
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun path = btrfs_alloc_path();
3680*4882a593Smuzhiyun if (!path) {
3681*4882a593Smuzhiyun ret = -ENOMEM;
3682*4882a593Smuzhiyun goto error;
3683*4882a593Smuzhiyun }
3684*4882a593Smuzhiyun
3685*4882a593Smuzhiyun /* zero out stat counters */
3686*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
3687*4882a593Smuzhiyun memset(&bctl->stat, 0, sizeof(bctl->stat));
3688*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
3689*4882a593Smuzhiyun again:
3690*4882a593Smuzhiyun if (!counting) {
3691*4882a593Smuzhiyun /*
3692*4882a593Smuzhiyun * The single value limit and min/max limits use the same bytes
3693*4882a593Smuzhiyun * in the
3694*4882a593Smuzhiyun */
3695*4882a593Smuzhiyun bctl->data.limit = limit_data;
3696*4882a593Smuzhiyun bctl->meta.limit = limit_meta;
3697*4882a593Smuzhiyun bctl->sys.limit = limit_sys;
3698*4882a593Smuzhiyun }
3699*4882a593Smuzhiyun key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3700*4882a593Smuzhiyun key.offset = (u64)-1;
3701*4882a593Smuzhiyun key.type = BTRFS_CHUNK_ITEM_KEY;
3702*4882a593Smuzhiyun
3703*4882a593Smuzhiyun while (1) {
3704*4882a593Smuzhiyun if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3705*4882a593Smuzhiyun atomic_read(&fs_info->balance_cancel_req)) {
3706*4882a593Smuzhiyun ret = -ECANCELED;
3707*4882a593Smuzhiyun goto error;
3708*4882a593Smuzhiyun }
3709*4882a593Smuzhiyun
3710*4882a593Smuzhiyun mutex_lock(&fs_info->delete_unused_bgs_mutex);
3711*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3712*4882a593Smuzhiyun if (ret < 0) {
3713*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3714*4882a593Smuzhiyun goto error;
3715*4882a593Smuzhiyun }
3716*4882a593Smuzhiyun
3717*4882a593Smuzhiyun /*
3718*4882a593Smuzhiyun * this shouldn't happen, it means the last relocate
3719*4882a593Smuzhiyun * failed
3720*4882a593Smuzhiyun */
3721*4882a593Smuzhiyun if (ret == 0)
3722*4882a593Smuzhiyun BUG(); /* FIXME break ? */
3723*4882a593Smuzhiyun
3724*4882a593Smuzhiyun ret = btrfs_previous_item(chunk_root, path, 0,
3725*4882a593Smuzhiyun BTRFS_CHUNK_ITEM_KEY);
3726*4882a593Smuzhiyun if (ret) {
3727*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3728*4882a593Smuzhiyun ret = 0;
3729*4882a593Smuzhiyun break;
3730*4882a593Smuzhiyun }
3731*4882a593Smuzhiyun
3732*4882a593Smuzhiyun leaf = path->nodes[0];
3733*4882a593Smuzhiyun slot = path->slots[0];
3734*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &found_key, slot);
3735*4882a593Smuzhiyun
3736*4882a593Smuzhiyun if (found_key.objectid != key.objectid) {
3737*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3738*4882a593Smuzhiyun break;
3739*4882a593Smuzhiyun }
3740*4882a593Smuzhiyun
3741*4882a593Smuzhiyun chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3742*4882a593Smuzhiyun chunk_type = btrfs_chunk_type(leaf, chunk);
3743*4882a593Smuzhiyun
3744*4882a593Smuzhiyun if (!counting) {
3745*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
3746*4882a593Smuzhiyun bctl->stat.considered++;
3747*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
3748*4882a593Smuzhiyun }
3749*4882a593Smuzhiyun
3750*4882a593Smuzhiyun ret = should_balance_chunk(leaf, chunk, found_key.offset);
3751*4882a593Smuzhiyun
3752*4882a593Smuzhiyun btrfs_release_path(path);
3753*4882a593Smuzhiyun if (!ret) {
3754*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3755*4882a593Smuzhiyun goto loop;
3756*4882a593Smuzhiyun }
3757*4882a593Smuzhiyun
3758*4882a593Smuzhiyun if (counting) {
3759*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3760*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
3761*4882a593Smuzhiyun bctl->stat.expected++;
3762*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
3763*4882a593Smuzhiyun
3764*4882a593Smuzhiyun if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3765*4882a593Smuzhiyun count_data++;
3766*4882a593Smuzhiyun else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3767*4882a593Smuzhiyun count_sys++;
3768*4882a593Smuzhiyun else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3769*4882a593Smuzhiyun count_meta++;
3770*4882a593Smuzhiyun
3771*4882a593Smuzhiyun goto loop;
3772*4882a593Smuzhiyun }
3773*4882a593Smuzhiyun
3774*4882a593Smuzhiyun /*
3775*4882a593Smuzhiyun * Apply limit_min filter, no need to check if the LIMITS
3776*4882a593Smuzhiyun * filter is used, limit_min is 0 by default
3777*4882a593Smuzhiyun */
3778*4882a593Smuzhiyun if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3779*4882a593Smuzhiyun count_data < bctl->data.limit_min)
3780*4882a593Smuzhiyun || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3781*4882a593Smuzhiyun count_meta < bctl->meta.limit_min)
3782*4882a593Smuzhiyun || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3783*4882a593Smuzhiyun count_sys < bctl->sys.limit_min)) {
3784*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3785*4882a593Smuzhiyun goto loop;
3786*4882a593Smuzhiyun }
3787*4882a593Smuzhiyun
3788*4882a593Smuzhiyun if (!chunk_reserved) {
3789*4882a593Smuzhiyun /*
3790*4882a593Smuzhiyun * We may be relocating the only data chunk we have,
3791*4882a593Smuzhiyun * which could potentially end up with losing data's
3792*4882a593Smuzhiyun * raid profile, so lets allocate an empty one in
3793*4882a593Smuzhiyun * advance.
3794*4882a593Smuzhiyun */
3795*4882a593Smuzhiyun ret = btrfs_may_alloc_data_chunk(fs_info,
3796*4882a593Smuzhiyun found_key.offset);
3797*4882a593Smuzhiyun if (ret < 0) {
3798*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3799*4882a593Smuzhiyun goto error;
3800*4882a593Smuzhiyun } else if (ret == 1) {
3801*4882a593Smuzhiyun chunk_reserved = 1;
3802*4882a593Smuzhiyun }
3803*4882a593Smuzhiyun }
3804*4882a593Smuzhiyun
3805*4882a593Smuzhiyun ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3806*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3807*4882a593Smuzhiyun if (ret == -ENOSPC) {
3808*4882a593Smuzhiyun enospc_errors++;
3809*4882a593Smuzhiyun } else if (ret == -ETXTBSY) {
3810*4882a593Smuzhiyun btrfs_info(fs_info,
3811*4882a593Smuzhiyun "skipping relocation of block group %llu due to active swapfile",
3812*4882a593Smuzhiyun found_key.offset);
3813*4882a593Smuzhiyun ret = 0;
3814*4882a593Smuzhiyun } else if (ret) {
3815*4882a593Smuzhiyun goto error;
3816*4882a593Smuzhiyun } else {
3817*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
3818*4882a593Smuzhiyun bctl->stat.completed++;
3819*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
3820*4882a593Smuzhiyun }
3821*4882a593Smuzhiyun loop:
3822*4882a593Smuzhiyun if (found_key.offset == 0)
3823*4882a593Smuzhiyun break;
3824*4882a593Smuzhiyun key.offset = found_key.offset - 1;
3825*4882a593Smuzhiyun }
3826*4882a593Smuzhiyun
3827*4882a593Smuzhiyun if (counting) {
3828*4882a593Smuzhiyun btrfs_release_path(path);
3829*4882a593Smuzhiyun counting = false;
3830*4882a593Smuzhiyun goto again;
3831*4882a593Smuzhiyun }
3832*4882a593Smuzhiyun error:
3833*4882a593Smuzhiyun btrfs_free_path(path);
3834*4882a593Smuzhiyun if (enospc_errors) {
3835*4882a593Smuzhiyun btrfs_info(fs_info, "%d enospc errors during balance",
3836*4882a593Smuzhiyun enospc_errors);
3837*4882a593Smuzhiyun if (!ret)
3838*4882a593Smuzhiyun ret = -ENOSPC;
3839*4882a593Smuzhiyun }
3840*4882a593Smuzhiyun
3841*4882a593Smuzhiyun return ret;
3842*4882a593Smuzhiyun }
3843*4882a593Smuzhiyun
3844*4882a593Smuzhiyun /**
3845*4882a593Smuzhiyun * alloc_profile_is_valid - see if a given profile is valid and reduced
3846*4882a593Smuzhiyun * @flags: profile to validate
3847*4882a593Smuzhiyun * @extended: if true @flags is treated as an extended profile
3848*4882a593Smuzhiyun */
alloc_profile_is_valid(u64 flags,int extended)3849*4882a593Smuzhiyun static int alloc_profile_is_valid(u64 flags, int extended)
3850*4882a593Smuzhiyun {
3851*4882a593Smuzhiyun u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3852*4882a593Smuzhiyun BTRFS_BLOCK_GROUP_PROFILE_MASK);
3853*4882a593Smuzhiyun
3854*4882a593Smuzhiyun flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3855*4882a593Smuzhiyun
3856*4882a593Smuzhiyun /* 1) check that all other bits are zeroed */
3857*4882a593Smuzhiyun if (flags & ~mask)
3858*4882a593Smuzhiyun return 0;
3859*4882a593Smuzhiyun
3860*4882a593Smuzhiyun /* 2) see if profile is reduced */
3861*4882a593Smuzhiyun if (flags == 0)
3862*4882a593Smuzhiyun return !extended; /* "0" is valid for usual profiles */
3863*4882a593Smuzhiyun
3864*4882a593Smuzhiyun return has_single_bit_set(flags);
3865*4882a593Smuzhiyun }
3866*4882a593Smuzhiyun
balance_need_close(struct btrfs_fs_info * fs_info)3867*4882a593Smuzhiyun static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3868*4882a593Smuzhiyun {
3869*4882a593Smuzhiyun /* cancel requested || normal exit path */
3870*4882a593Smuzhiyun return atomic_read(&fs_info->balance_cancel_req) ||
3871*4882a593Smuzhiyun (atomic_read(&fs_info->balance_pause_req) == 0 &&
3872*4882a593Smuzhiyun atomic_read(&fs_info->balance_cancel_req) == 0);
3873*4882a593Smuzhiyun }
3874*4882a593Smuzhiyun
3875*4882a593Smuzhiyun /*
3876*4882a593Smuzhiyun * Validate target profile against allowed profiles and return true if it's OK.
3877*4882a593Smuzhiyun * Otherwise print the error message and return false.
3878*4882a593Smuzhiyun */
validate_convert_profile(struct btrfs_fs_info * fs_info,const struct btrfs_balance_args * bargs,u64 allowed,const char * type)3879*4882a593Smuzhiyun static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3880*4882a593Smuzhiyun const struct btrfs_balance_args *bargs,
3881*4882a593Smuzhiyun u64 allowed, const char *type)
3882*4882a593Smuzhiyun {
3883*4882a593Smuzhiyun if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3884*4882a593Smuzhiyun return true;
3885*4882a593Smuzhiyun
3886*4882a593Smuzhiyun /* Profile is valid and does not have bits outside of the allowed set */
3887*4882a593Smuzhiyun if (alloc_profile_is_valid(bargs->target, 1) &&
3888*4882a593Smuzhiyun (bargs->target & ~allowed) == 0)
3889*4882a593Smuzhiyun return true;
3890*4882a593Smuzhiyun
3891*4882a593Smuzhiyun btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3892*4882a593Smuzhiyun type, btrfs_bg_type_to_raid_name(bargs->target));
3893*4882a593Smuzhiyun return false;
3894*4882a593Smuzhiyun }
3895*4882a593Smuzhiyun
3896*4882a593Smuzhiyun /*
3897*4882a593Smuzhiyun * Fill @buf with textual description of balance filter flags @bargs, up to
3898*4882a593Smuzhiyun * @size_buf including the terminating null. The output may be trimmed if it
3899*4882a593Smuzhiyun * does not fit into the provided buffer.
3900*4882a593Smuzhiyun */
describe_balance_args(struct btrfs_balance_args * bargs,char * buf,u32 size_buf)3901*4882a593Smuzhiyun static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3902*4882a593Smuzhiyun u32 size_buf)
3903*4882a593Smuzhiyun {
3904*4882a593Smuzhiyun int ret;
3905*4882a593Smuzhiyun u32 size_bp = size_buf;
3906*4882a593Smuzhiyun char *bp = buf;
3907*4882a593Smuzhiyun u64 flags = bargs->flags;
3908*4882a593Smuzhiyun char tmp_buf[128] = {'\0'};
3909*4882a593Smuzhiyun
3910*4882a593Smuzhiyun if (!flags)
3911*4882a593Smuzhiyun return;
3912*4882a593Smuzhiyun
3913*4882a593Smuzhiyun #define CHECK_APPEND_NOARG(a) \
3914*4882a593Smuzhiyun do { \
3915*4882a593Smuzhiyun ret = snprintf(bp, size_bp, (a)); \
3916*4882a593Smuzhiyun if (ret < 0 || ret >= size_bp) \
3917*4882a593Smuzhiyun goto out_overflow; \
3918*4882a593Smuzhiyun size_bp -= ret; \
3919*4882a593Smuzhiyun bp += ret; \
3920*4882a593Smuzhiyun } while (0)
3921*4882a593Smuzhiyun
3922*4882a593Smuzhiyun #define CHECK_APPEND_1ARG(a, v1) \
3923*4882a593Smuzhiyun do { \
3924*4882a593Smuzhiyun ret = snprintf(bp, size_bp, (a), (v1)); \
3925*4882a593Smuzhiyun if (ret < 0 || ret >= size_bp) \
3926*4882a593Smuzhiyun goto out_overflow; \
3927*4882a593Smuzhiyun size_bp -= ret; \
3928*4882a593Smuzhiyun bp += ret; \
3929*4882a593Smuzhiyun } while (0)
3930*4882a593Smuzhiyun
3931*4882a593Smuzhiyun #define CHECK_APPEND_2ARG(a, v1, v2) \
3932*4882a593Smuzhiyun do { \
3933*4882a593Smuzhiyun ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3934*4882a593Smuzhiyun if (ret < 0 || ret >= size_bp) \
3935*4882a593Smuzhiyun goto out_overflow; \
3936*4882a593Smuzhiyun size_bp -= ret; \
3937*4882a593Smuzhiyun bp += ret; \
3938*4882a593Smuzhiyun } while (0)
3939*4882a593Smuzhiyun
3940*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3941*4882a593Smuzhiyun CHECK_APPEND_1ARG("convert=%s,",
3942*4882a593Smuzhiyun btrfs_bg_type_to_raid_name(bargs->target));
3943*4882a593Smuzhiyun
3944*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_SOFT)
3945*4882a593Smuzhiyun CHECK_APPEND_NOARG("soft,");
3946*4882a593Smuzhiyun
3947*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3948*4882a593Smuzhiyun btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3949*4882a593Smuzhiyun sizeof(tmp_buf));
3950*4882a593Smuzhiyun CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3951*4882a593Smuzhiyun }
3952*4882a593Smuzhiyun
3953*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_USAGE)
3954*4882a593Smuzhiyun CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3955*4882a593Smuzhiyun
3956*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3957*4882a593Smuzhiyun CHECK_APPEND_2ARG("usage=%u..%u,",
3958*4882a593Smuzhiyun bargs->usage_min, bargs->usage_max);
3959*4882a593Smuzhiyun
3960*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_DEVID)
3961*4882a593Smuzhiyun CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3962*4882a593Smuzhiyun
3963*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_DRANGE)
3964*4882a593Smuzhiyun CHECK_APPEND_2ARG("drange=%llu..%llu,",
3965*4882a593Smuzhiyun bargs->pstart, bargs->pend);
3966*4882a593Smuzhiyun
3967*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_VRANGE)
3968*4882a593Smuzhiyun CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3969*4882a593Smuzhiyun bargs->vstart, bargs->vend);
3970*4882a593Smuzhiyun
3971*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_LIMIT)
3972*4882a593Smuzhiyun CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
3973*4882a593Smuzhiyun
3974*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
3975*4882a593Smuzhiyun CHECK_APPEND_2ARG("limit=%u..%u,",
3976*4882a593Smuzhiyun bargs->limit_min, bargs->limit_max);
3977*4882a593Smuzhiyun
3978*4882a593Smuzhiyun if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
3979*4882a593Smuzhiyun CHECK_APPEND_2ARG("stripes=%u..%u,",
3980*4882a593Smuzhiyun bargs->stripes_min, bargs->stripes_max);
3981*4882a593Smuzhiyun
3982*4882a593Smuzhiyun #undef CHECK_APPEND_2ARG
3983*4882a593Smuzhiyun #undef CHECK_APPEND_1ARG
3984*4882a593Smuzhiyun #undef CHECK_APPEND_NOARG
3985*4882a593Smuzhiyun
3986*4882a593Smuzhiyun out_overflow:
3987*4882a593Smuzhiyun
3988*4882a593Smuzhiyun if (size_bp < size_buf)
3989*4882a593Smuzhiyun buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
3990*4882a593Smuzhiyun else
3991*4882a593Smuzhiyun buf[0] = '\0';
3992*4882a593Smuzhiyun }
3993*4882a593Smuzhiyun
describe_balance_start_or_resume(struct btrfs_fs_info * fs_info)3994*4882a593Smuzhiyun static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
3995*4882a593Smuzhiyun {
3996*4882a593Smuzhiyun u32 size_buf = 1024;
3997*4882a593Smuzhiyun char tmp_buf[192] = {'\0'};
3998*4882a593Smuzhiyun char *buf;
3999*4882a593Smuzhiyun char *bp;
4000*4882a593Smuzhiyun u32 size_bp = size_buf;
4001*4882a593Smuzhiyun int ret;
4002*4882a593Smuzhiyun struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun buf = kzalloc(size_buf, GFP_KERNEL);
4005*4882a593Smuzhiyun if (!buf)
4006*4882a593Smuzhiyun return;
4007*4882a593Smuzhiyun
4008*4882a593Smuzhiyun bp = buf;
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun #define CHECK_APPEND_1ARG(a, v1) \
4011*4882a593Smuzhiyun do { \
4012*4882a593Smuzhiyun ret = snprintf(bp, size_bp, (a), (v1)); \
4013*4882a593Smuzhiyun if (ret < 0 || ret >= size_bp) \
4014*4882a593Smuzhiyun goto out_overflow; \
4015*4882a593Smuzhiyun size_bp -= ret; \
4016*4882a593Smuzhiyun bp += ret; \
4017*4882a593Smuzhiyun } while (0)
4018*4882a593Smuzhiyun
4019*4882a593Smuzhiyun if (bctl->flags & BTRFS_BALANCE_FORCE)
4020*4882a593Smuzhiyun CHECK_APPEND_1ARG("%s", "-f ");
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun if (bctl->flags & BTRFS_BALANCE_DATA) {
4023*4882a593Smuzhiyun describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4024*4882a593Smuzhiyun CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4025*4882a593Smuzhiyun }
4026*4882a593Smuzhiyun
4027*4882a593Smuzhiyun if (bctl->flags & BTRFS_BALANCE_METADATA) {
4028*4882a593Smuzhiyun describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4029*4882a593Smuzhiyun CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4030*4882a593Smuzhiyun }
4031*4882a593Smuzhiyun
4032*4882a593Smuzhiyun if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4033*4882a593Smuzhiyun describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4034*4882a593Smuzhiyun CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4035*4882a593Smuzhiyun }
4036*4882a593Smuzhiyun
4037*4882a593Smuzhiyun #undef CHECK_APPEND_1ARG
4038*4882a593Smuzhiyun
4039*4882a593Smuzhiyun out_overflow:
4040*4882a593Smuzhiyun
4041*4882a593Smuzhiyun if (size_bp < size_buf)
4042*4882a593Smuzhiyun buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4043*4882a593Smuzhiyun btrfs_info(fs_info, "balance: %s %s",
4044*4882a593Smuzhiyun (bctl->flags & BTRFS_BALANCE_RESUME) ?
4045*4882a593Smuzhiyun "resume" : "start", buf);
4046*4882a593Smuzhiyun
4047*4882a593Smuzhiyun kfree(buf);
4048*4882a593Smuzhiyun }
4049*4882a593Smuzhiyun
4050*4882a593Smuzhiyun /*
4051*4882a593Smuzhiyun * Should be called with balance mutexe held
4052*4882a593Smuzhiyun */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)4053*4882a593Smuzhiyun int btrfs_balance(struct btrfs_fs_info *fs_info,
4054*4882a593Smuzhiyun struct btrfs_balance_control *bctl,
4055*4882a593Smuzhiyun struct btrfs_ioctl_balance_args *bargs)
4056*4882a593Smuzhiyun {
4057*4882a593Smuzhiyun u64 meta_target, data_target;
4058*4882a593Smuzhiyun u64 allowed;
4059*4882a593Smuzhiyun int mixed = 0;
4060*4882a593Smuzhiyun int ret;
4061*4882a593Smuzhiyun u64 num_devices;
4062*4882a593Smuzhiyun unsigned seq;
4063*4882a593Smuzhiyun bool reducing_redundancy;
4064*4882a593Smuzhiyun int i;
4065*4882a593Smuzhiyun
4066*4882a593Smuzhiyun if (btrfs_fs_closing(fs_info) ||
4067*4882a593Smuzhiyun atomic_read(&fs_info->balance_pause_req) ||
4068*4882a593Smuzhiyun btrfs_should_cancel_balance(fs_info)) {
4069*4882a593Smuzhiyun ret = -EINVAL;
4070*4882a593Smuzhiyun goto out;
4071*4882a593Smuzhiyun }
4072*4882a593Smuzhiyun
4073*4882a593Smuzhiyun allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4074*4882a593Smuzhiyun if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4075*4882a593Smuzhiyun mixed = 1;
4076*4882a593Smuzhiyun
4077*4882a593Smuzhiyun /*
4078*4882a593Smuzhiyun * In case of mixed groups both data and meta should be picked,
4079*4882a593Smuzhiyun * and identical options should be given for both of them.
4080*4882a593Smuzhiyun */
4081*4882a593Smuzhiyun allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4082*4882a593Smuzhiyun if (mixed && (bctl->flags & allowed)) {
4083*4882a593Smuzhiyun if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4084*4882a593Smuzhiyun !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4085*4882a593Smuzhiyun memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4086*4882a593Smuzhiyun btrfs_err(fs_info,
4087*4882a593Smuzhiyun "balance: mixed groups data and metadata options must be the same");
4088*4882a593Smuzhiyun ret = -EINVAL;
4089*4882a593Smuzhiyun goto out;
4090*4882a593Smuzhiyun }
4091*4882a593Smuzhiyun }
4092*4882a593Smuzhiyun
4093*4882a593Smuzhiyun /*
4094*4882a593Smuzhiyun * rw_devices will not change at the moment, device add/delete/replace
4095*4882a593Smuzhiyun * are exclusive
4096*4882a593Smuzhiyun */
4097*4882a593Smuzhiyun num_devices = fs_info->fs_devices->rw_devices;
4098*4882a593Smuzhiyun
4099*4882a593Smuzhiyun /*
4100*4882a593Smuzhiyun * SINGLE profile on-disk has no profile bit, but in-memory we have a
4101*4882a593Smuzhiyun * special bit for it, to make it easier to distinguish. Thus we need
4102*4882a593Smuzhiyun * to set it manually, or balance would refuse the profile.
4103*4882a593Smuzhiyun */
4104*4882a593Smuzhiyun allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4105*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4106*4882a593Smuzhiyun if (num_devices >= btrfs_raid_array[i].devs_min)
4107*4882a593Smuzhiyun allowed |= btrfs_raid_array[i].bg_flag;
4108*4882a593Smuzhiyun
4109*4882a593Smuzhiyun if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4110*4882a593Smuzhiyun !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4111*4882a593Smuzhiyun !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
4112*4882a593Smuzhiyun ret = -EINVAL;
4113*4882a593Smuzhiyun goto out;
4114*4882a593Smuzhiyun }
4115*4882a593Smuzhiyun
4116*4882a593Smuzhiyun /*
4117*4882a593Smuzhiyun * Allow to reduce metadata or system integrity only if force set for
4118*4882a593Smuzhiyun * profiles with redundancy (copies, parity)
4119*4882a593Smuzhiyun */
4120*4882a593Smuzhiyun allowed = 0;
4121*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4122*4882a593Smuzhiyun if (btrfs_raid_array[i].ncopies >= 2 ||
4123*4882a593Smuzhiyun btrfs_raid_array[i].tolerated_failures >= 1)
4124*4882a593Smuzhiyun allowed |= btrfs_raid_array[i].bg_flag;
4125*4882a593Smuzhiyun }
4126*4882a593Smuzhiyun do {
4127*4882a593Smuzhiyun seq = read_seqbegin(&fs_info->profiles_lock);
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4130*4882a593Smuzhiyun (fs_info->avail_system_alloc_bits & allowed) &&
4131*4882a593Smuzhiyun !(bctl->sys.target & allowed)) ||
4132*4882a593Smuzhiyun ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4133*4882a593Smuzhiyun (fs_info->avail_metadata_alloc_bits & allowed) &&
4134*4882a593Smuzhiyun !(bctl->meta.target & allowed)))
4135*4882a593Smuzhiyun reducing_redundancy = true;
4136*4882a593Smuzhiyun else
4137*4882a593Smuzhiyun reducing_redundancy = false;
4138*4882a593Smuzhiyun
4139*4882a593Smuzhiyun /* if we're not converting, the target field is uninitialized */
4140*4882a593Smuzhiyun meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4141*4882a593Smuzhiyun bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4142*4882a593Smuzhiyun data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4143*4882a593Smuzhiyun bctl->data.target : fs_info->avail_data_alloc_bits;
4144*4882a593Smuzhiyun } while (read_seqretry(&fs_info->profiles_lock, seq));
4145*4882a593Smuzhiyun
4146*4882a593Smuzhiyun if (reducing_redundancy) {
4147*4882a593Smuzhiyun if (bctl->flags & BTRFS_BALANCE_FORCE) {
4148*4882a593Smuzhiyun btrfs_info(fs_info,
4149*4882a593Smuzhiyun "balance: force reducing metadata redundancy");
4150*4882a593Smuzhiyun } else {
4151*4882a593Smuzhiyun btrfs_err(fs_info,
4152*4882a593Smuzhiyun "balance: reduces metadata redundancy, use --force if you want this");
4153*4882a593Smuzhiyun ret = -EINVAL;
4154*4882a593Smuzhiyun goto out;
4155*4882a593Smuzhiyun }
4156*4882a593Smuzhiyun }
4157*4882a593Smuzhiyun
4158*4882a593Smuzhiyun if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4159*4882a593Smuzhiyun btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4160*4882a593Smuzhiyun btrfs_warn(fs_info,
4161*4882a593Smuzhiyun "balance: metadata profile %s has lower redundancy than data profile %s",
4162*4882a593Smuzhiyun btrfs_bg_type_to_raid_name(meta_target),
4163*4882a593Smuzhiyun btrfs_bg_type_to_raid_name(data_target));
4164*4882a593Smuzhiyun }
4165*4882a593Smuzhiyun
4166*4882a593Smuzhiyun if (fs_info->send_in_progress) {
4167*4882a593Smuzhiyun btrfs_warn_rl(fs_info,
4168*4882a593Smuzhiyun "cannot run balance while send operations are in progress (%d in progress)",
4169*4882a593Smuzhiyun fs_info->send_in_progress);
4170*4882a593Smuzhiyun ret = -EAGAIN;
4171*4882a593Smuzhiyun goto out;
4172*4882a593Smuzhiyun }
4173*4882a593Smuzhiyun
4174*4882a593Smuzhiyun ret = insert_balance_item(fs_info, bctl);
4175*4882a593Smuzhiyun if (ret && ret != -EEXIST)
4176*4882a593Smuzhiyun goto out;
4177*4882a593Smuzhiyun
4178*4882a593Smuzhiyun if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4179*4882a593Smuzhiyun BUG_ON(ret == -EEXIST);
4180*4882a593Smuzhiyun BUG_ON(fs_info->balance_ctl);
4181*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
4182*4882a593Smuzhiyun fs_info->balance_ctl = bctl;
4183*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
4184*4882a593Smuzhiyun } else {
4185*4882a593Smuzhiyun BUG_ON(ret != -EEXIST);
4186*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
4187*4882a593Smuzhiyun update_balance_args(bctl);
4188*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
4189*4882a593Smuzhiyun }
4190*4882a593Smuzhiyun
4191*4882a593Smuzhiyun ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4192*4882a593Smuzhiyun set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4193*4882a593Smuzhiyun describe_balance_start_or_resume(fs_info);
4194*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4195*4882a593Smuzhiyun
4196*4882a593Smuzhiyun ret = __btrfs_balance(fs_info);
4197*4882a593Smuzhiyun
4198*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4199*4882a593Smuzhiyun if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4200*4882a593Smuzhiyun btrfs_info(fs_info, "balance: paused");
4201*4882a593Smuzhiyun /*
4202*4882a593Smuzhiyun * Balance can be canceled by:
4203*4882a593Smuzhiyun *
4204*4882a593Smuzhiyun * - Regular cancel request
4205*4882a593Smuzhiyun * Then ret == -ECANCELED and balance_cancel_req > 0
4206*4882a593Smuzhiyun *
4207*4882a593Smuzhiyun * - Fatal signal to "btrfs" process
4208*4882a593Smuzhiyun * Either the signal caught by wait_reserve_ticket() and callers
4209*4882a593Smuzhiyun * got -EINTR, or caught by btrfs_should_cancel_balance() and
4210*4882a593Smuzhiyun * got -ECANCELED.
4211*4882a593Smuzhiyun * Either way, in this case balance_cancel_req = 0, and
4212*4882a593Smuzhiyun * ret == -EINTR or ret == -ECANCELED.
4213*4882a593Smuzhiyun *
4214*4882a593Smuzhiyun * So here we only check the return value to catch canceled balance.
4215*4882a593Smuzhiyun */
4216*4882a593Smuzhiyun else if (ret == -ECANCELED || ret == -EINTR)
4217*4882a593Smuzhiyun btrfs_info(fs_info, "balance: canceled");
4218*4882a593Smuzhiyun else
4219*4882a593Smuzhiyun btrfs_info(fs_info, "balance: ended with status: %d", ret);
4220*4882a593Smuzhiyun
4221*4882a593Smuzhiyun clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4222*4882a593Smuzhiyun
4223*4882a593Smuzhiyun if (bargs) {
4224*4882a593Smuzhiyun memset(bargs, 0, sizeof(*bargs));
4225*4882a593Smuzhiyun btrfs_update_ioctl_balance_args(fs_info, bargs);
4226*4882a593Smuzhiyun }
4227*4882a593Smuzhiyun
4228*4882a593Smuzhiyun if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4229*4882a593Smuzhiyun balance_need_close(fs_info)) {
4230*4882a593Smuzhiyun reset_balance_state(fs_info);
4231*4882a593Smuzhiyun btrfs_exclop_finish(fs_info);
4232*4882a593Smuzhiyun }
4233*4882a593Smuzhiyun
4234*4882a593Smuzhiyun wake_up(&fs_info->balance_wait_q);
4235*4882a593Smuzhiyun
4236*4882a593Smuzhiyun return ret;
4237*4882a593Smuzhiyun out:
4238*4882a593Smuzhiyun if (bctl->flags & BTRFS_BALANCE_RESUME)
4239*4882a593Smuzhiyun reset_balance_state(fs_info);
4240*4882a593Smuzhiyun else
4241*4882a593Smuzhiyun kfree(bctl);
4242*4882a593Smuzhiyun btrfs_exclop_finish(fs_info);
4243*4882a593Smuzhiyun
4244*4882a593Smuzhiyun return ret;
4245*4882a593Smuzhiyun }
4246*4882a593Smuzhiyun
balance_kthread(void * data)4247*4882a593Smuzhiyun static int balance_kthread(void *data)
4248*4882a593Smuzhiyun {
4249*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = data;
4250*4882a593Smuzhiyun int ret = 0;
4251*4882a593Smuzhiyun
4252*4882a593Smuzhiyun sb_start_write(fs_info->sb);
4253*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4254*4882a593Smuzhiyun if (fs_info->balance_ctl)
4255*4882a593Smuzhiyun ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4256*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4257*4882a593Smuzhiyun sb_end_write(fs_info->sb);
4258*4882a593Smuzhiyun
4259*4882a593Smuzhiyun return ret;
4260*4882a593Smuzhiyun }
4261*4882a593Smuzhiyun
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)4262*4882a593Smuzhiyun int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4263*4882a593Smuzhiyun {
4264*4882a593Smuzhiyun struct task_struct *tsk;
4265*4882a593Smuzhiyun
4266*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4267*4882a593Smuzhiyun if (!fs_info->balance_ctl) {
4268*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4269*4882a593Smuzhiyun return 0;
4270*4882a593Smuzhiyun }
4271*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4272*4882a593Smuzhiyun
4273*4882a593Smuzhiyun if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4274*4882a593Smuzhiyun btrfs_info(fs_info, "balance: resume skipped");
4275*4882a593Smuzhiyun return 0;
4276*4882a593Smuzhiyun }
4277*4882a593Smuzhiyun
4278*4882a593Smuzhiyun /*
4279*4882a593Smuzhiyun * A ro->rw remount sequence should continue with the paused balance
4280*4882a593Smuzhiyun * regardless of who pauses it, system or the user as of now, so set
4281*4882a593Smuzhiyun * the resume flag.
4282*4882a593Smuzhiyun */
4283*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
4284*4882a593Smuzhiyun fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4285*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
4286*4882a593Smuzhiyun
4287*4882a593Smuzhiyun tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4288*4882a593Smuzhiyun return PTR_ERR_OR_ZERO(tsk);
4289*4882a593Smuzhiyun }
4290*4882a593Smuzhiyun
btrfs_recover_balance(struct btrfs_fs_info * fs_info)4291*4882a593Smuzhiyun int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4292*4882a593Smuzhiyun {
4293*4882a593Smuzhiyun struct btrfs_balance_control *bctl;
4294*4882a593Smuzhiyun struct btrfs_balance_item *item;
4295*4882a593Smuzhiyun struct btrfs_disk_balance_args disk_bargs;
4296*4882a593Smuzhiyun struct btrfs_path *path;
4297*4882a593Smuzhiyun struct extent_buffer *leaf;
4298*4882a593Smuzhiyun struct btrfs_key key;
4299*4882a593Smuzhiyun int ret;
4300*4882a593Smuzhiyun
4301*4882a593Smuzhiyun path = btrfs_alloc_path();
4302*4882a593Smuzhiyun if (!path)
4303*4882a593Smuzhiyun return -ENOMEM;
4304*4882a593Smuzhiyun
4305*4882a593Smuzhiyun key.objectid = BTRFS_BALANCE_OBJECTID;
4306*4882a593Smuzhiyun key.type = BTRFS_TEMPORARY_ITEM_KEY;
4307*4882a593Smuzhiyun key.offset = 0;
4308*4882a593Smuzhiyun
4309*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4310*4882a593Smuzhiyun if (ret < 0)
4311*4882a593Smuzhiyun goto out;
4312*4882a593Smuzhiyun if (ret > 0) { /* ret = -ENOENT; */
4313*4882a593Smuzhiyun ret = 0;
4314*4882a593Smuzhiyun goto out;
4315*4882a593Smuzhiyun }
4316*4882a593Smuzhiyun
4317*4882a593Smuzhiyun bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4318*4882a593Smuzhiyun if (!bctl) {
4319*4882a593Smuzhiyun ret = -ENOMEM;
4320*4882a593Smuzhiyun goto out;
4321*4882a593Smuzhiyun }
4322*4882a593Smuzhiyun
4323*4882a593Smuzhiyun leaf = path->nodes[0];
4324*4882a593Smuzhiyun item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4325*4882a593Smuzhiyun
4326*4882a593Smuzhiyun bctl->flags = btrfs_balance_flags(leaf, item);
4327*4882a593Smuzhiyun bctl->flags |= BTRFS_BALANCE_RESUME;
4328*4882a593Smuzhiyun
4329*4882a593Smuzhiyun btrfs_balance_data(leaf, item, &disk_bargs);
4330*4882a593Smuzhiyun btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4331*4882a593Smuzhiyun btrfs_balance_meta(leaf, item, &disk_bargs);
4332*4882a593Smuzhiyun btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4333*4882a593Smuzhiyun btrfs_balance_sys(leaf, item, &disk_bargs);
4334*4882a593Smuzhiyun btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4335*4882a593Smuzhiyun
4336*4882a593Smuzhiyun /*
4337*4882a593Smuzhiyun * This should never happen, as the paused balance state is recovered
4338*4882a593Smuzhiyun * during mount without any chance of other exclusive ops to collide.
4339*4882a593Smuzhiyun *
4340*4882a593Smuzhiyun * This gives the exclusive op status to balance and keeps in paused
4341*4882a593Smuzhiyun * state until user intervention (cancel or umount). If the ownership
4342*4882a593Smuzhiyun * cannot be assigned, show a message but do not fail. The balance
4343*4882a593Smuzhiyun * is in a paused state and must have fs_info::balance_ctl properly
4344*4882a593Smuzhiyun * set up.
4345*4882a593Smuzhiyun */
4346*4882a593Smuzhiyun if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4347*4882a593Smuzhiyun btrfs_warn(fs_info,
4348*4882a593Smuzhiyun "balance: cannot set exclusive op status, resume manually");
4349*4882a593Smuzhiyun
4350*4882a593Smuzhiyun btrfs_release_path(path);
4351*4882a593Smuzhiyun
4352*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4353*4882a593Smuzhiyun BUG_ON(fs_info->balance_ctl);
4354*4882a593Smuzhiyun spin_lock(&fs_info->balance_lock);
4355*4882a593Smuzhiyun fs_info->balance_ctl = bctl;
4356*4882a593Smuzhiyun spin_unlock(&fs_info->balance_lock);
4357*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4358*4882a593Smuzhiyun out:
4359*4882a593Smuzhiyun btrfs_free_path(path);
4360*4882a593Smuzhiyun return ret;
4361*4882a593Smuzhiyun }
4362*4882a593Smuzhiyun
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4363*4882a593Smuzhiyun int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4364*4882a593Smuzhiyun {
4365*4882a593Smuzhiyun int ret = 0;
4366*4882a593Smuzhiyun
4367*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4368*4882a593Smuzhiyun if (!fs_info->balance_ctl) {
4369*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4370*4882a593Smuzhiyun return -ENOTCONN;
4371*4882a593Smuzhiyun }
4372*4882a593Smuzhiyun
4373*4882a593Smuzhiyun if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4374*4882a593Smuzhiyun atomic_inc(&fs_info->balance_pause_req);
4375*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4376*4882a593Smuzhiyun
4377*4882a593Smuzhiyun wait_event(fs_info->balance_wait_q,
4378*4882a593Smuzhiyun !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4379*4882a593Smuzhiyun
4380*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4381*4882a593Smuzhiyun /* we are good with balance_ctl ripped off from under us */
4382*4882a593Smuzhiyun BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4383*4882a593Smuzhiyun atomic_dec(&fs_info->balance_pause_req);
4384*4882a593Smuzhiyun } else {
4385*4882a593Smuzhiyun ret = -ENOTCONN;
4386*4882a593Smuzhiyun }
4387*4882a593Smuzhiyun
4388*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4389*4882a593Smuzhiyun return ret;
4390*4882a593Smuzhiyun }
4391*4882a593Smuzhiyun
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4392*4882a593Smuzhiyun int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4393*4882a593Smuzhiyun {
4394*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4395*4882a593Smuzhiyun if (!fs_info->balance_ctl) {
4396*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4397*4882a593Smuzhiyun return -ENOTCONN;
4398*4882a593Smuzhiyun }
4399*4882a593Smuzhiyun
4400*4882a593Smuzhiyun /*
4401*4882a593Smuzhiyun * A paused balance with the item stored on disk can be resumed at
4402*4882a593Smuzhiyun * mount time if the mount is read-write. Otherwise it's still paused
4403*4882a593Smuzhiyun * and we must not allow cancelling as it deletes the item.
4404*4882a593Smuzhiyun */
4405*4882a593Smuzhiyun if (sb_rdonly(fs_info->sb)) {
4406*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4407*4882a593Smuzhiyun return -EROFS;
4408*4882a593Smuzhiyun }
4409*4882a593Smuzhiyun
4410*4882a593Smuzhiyun atomic_inc(&fs_info->balance_cancel_req);
4411*4882a593Smuzhiyun /*
4412*4882a593Smuzhiyun * if we are running just wait and return, balance item is
4413*4882a593Smuzhiyun * deleted in btrfs_balance in this case
4414*4882a593Smuzhiyun */
4415*4882a593Smuzhiyun if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4416*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4417*4882a593Smuzhiyun wait_event(fs_info->balance_wait_q,
4418*4882a593Smuzhiyun !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4419*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4420*4882a593Smuzhiyun } else {
4421*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4422*4882a593Smuzhiyun /*
4423*4882a593Smuzhiyun * Lock released to allow other waiters to continue, we'll
4424*4882a593Smuzhiyun * reexamine the status again.
4425*4882a593Smuzhiyun */
4426*4882a593Smuzhiyun mutex_lock(&fs_info->balance_mutex);
4427*4882a593Smuzhiyun
4428*4882a593Smuzhiyun if (fs_info->balance_ctl) {
4429*4882a593Smuzhiyun reset_balance_state(fs_info);
4430*4882a593Smuzhiyun btrfs_exclop_finish(fs_info);
4431*4882a593Smuzhiyun btrfs_info(fs_info, "balance: canceled");
4432*4882a593Smuzhiyun }
4433*4882a593Smuzhiyun }
4434*4882a593Smuzhiyun
4435*4882a593Smuzhiyun BUG_ON(fs_info->balance_ctl ||
4436*4882a593Smuzhiyun test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4437*4882a593Smuzhiyun atomic_dec(&fs_info->balance_cancel_req);
4438*4882a593Smuzhiyun mutex_unlock(&fs_info->balance_mutex);
4439*4882a593Smuzhiyun return 0;
4440*4882a593Smuzhiyun }
4441*4882a593Smuzhiyun
btrfs_uuid_scan_kthread(void * data)4442*4882a593Smuzhiyun int btrfs_uuid_scan_kthread(void *data)
4443*4882a593Smuzhiyun {
4444*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = data;
4445*4882a593Smuzhiyun struct btrfs_root *root = fs_info->tree_root;
4446*4882a593Smuzhiyun struct btrfs_key key;
4447*4882a593Smuzhiyun struct btrfs_path *path = NULL;
4448*4882a593Smuzhiyun int ret = 0;
4449*4882a593Smuzhiyun struct extent_buffer *eb;
4450*4882a593Smuzhiyun int slot;
4451*4882a593Smuzhiyun struct btrfs_root_item root_item;
4452*4882a593Smuzhiyun u32 item_size;
4453*4882a593Smuzhiyun struct btrfs_trans_handle *trans = NULL;
4454*4882a593Smuzhiyun bool closing = false;
4455*4882a593Smuzhiyun
4456*4882a593Smuzhiyun path = btrfs_alloc_path();
4457*4882a593Smuzhiyun if (!path) {
4458*4882a593Smuzhiyun ret = -ENOMEM;
4459*4882a593Smuzhiyun goto out;
4460*4882a593Smuzhiyun }
4461*4882a593Smuzhiyun
4462*4882a593Smuzhiyun key.objectid = 0;
4463*4882a593Smuzhiyun key.type = BTRFS_ROOT_ITEM_KEY;
4464*4882a593Smuzhiyun key.offset = 0;
4465*4882a593Smuzhiyun
4466*4882a593Smuzhiyun while (1) {
4467*4882a593Smuzhiyun if (btrfs_fs_closing(fs_info)) {
4468*4882a593Smuzhiyun closing = true;
4469*4882a593Smuzhiyun break;
4470*4882a593Smuzhiyun }
4471*4882a593Smuzhiyun ret = btrfs_search_forward(root, &key, path,
4472*4882a593Smuzhiyun BTRFS_OLDEST_GENERATION);
4473*4882a593Smuzhiyun if (ret) {
4474*4882a593Smuzhiyun if (ret > 0)
4475*4882a593Smuzhiyun ret = 0;
4476*4882a593Smuzhiyun break;
4477*4882a593Smuzhiyun }
4478*4882a593Smuzhiyun
4479*4882a593Smuzhiyun if (key.type != BTRFS_ROOT_ITEM_KEY ||
4480*4882a593Smuzhiyun (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4481*4882a593Smuzhiyun key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4482*4882a593Smuzhiyun key.objectid > BTRFS_LAST_FREE_OBJECTID)
4483*4882a593Smuzhiyun goto skip;
4484*4882a593Smuzhiyun
4485*4882a593Smuzhiyun eb = path->nodes[0];
4486*4882a593Smuzhiyun slot = path->slots[0];
4487*4882a593Smuzhiyun item_size = btrfs_item_size_nr(eb, slot);
4488*4882a593Smuzhiyun if (item_size < sizeof(root_item))
4489*4882a593Smuzhiyun goto skip;
4490*4882a593Smuzhiyun
4491*4882a593Smuzhiyun read_extent_buffer(eb, &root_item,
4492*4882a593Smuzhiyun btrfs_item_ptr_offset(eb, slot),
4493*4882a593Smuzhiyun (int)sizeof(root_item));
4494*4882a593Smuzhiyun if (btrfs_root_refs(&root_item) == 0)
4495*4882a593Smuzhiyun goto skip;
4496*4882a593Smuzhiyun
4497*4882a593Smuzhiyun if (!btrfs_is_empty_uuid(root_item.uuid) ||
4498*4882a593Smuzhiyun !btrfs_is_empty_uuid(root_item.received_uuid)) {
4499*4882a593Smuzhiyun if (trans)
4500*4882a593Smuzhiyun goto update_tree;
4501*4882a593Smuzhiyun
4502*4882a593Smuzhiyun btrfs_release_path(path);
4503*4882a593Smuzhiyun /*
4504*4882a593Smuzhiyun * 1 - subvol uuid item
4505*4882a593Smuzhiyun * 1 - received_subvol uuid item
4506*4882a593Smuzhiyun */
4507*4882a593Smuzhiyun trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4508*4882a593Smuzhiyun if (IS_ERR(trans)) {
4509*4882a593Smuzhiyun ret = PTR_ERR(trans);
4510*4882a593Smuzhiyun break;
4511*4882a593Smuzhiyun }
4512*4882a593Smuzhiyun continue;
4513*4882a593Smuzhiyun } else {
4514*4882a593Smuzhiyun goto skip;
4515*4882a593Smuzhiyun }
4516*4882a593Smuzhiyun update_tree:
4517*4882a593Smuzhiyun btrfs_release_path(path);
4518*4882a593Smuzhiyun if (!btrfs_is_empty_uuid(root_item.uuid)) {
4519*4882a593Smuzhiyun ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4520*4882a593Smuzhiyun BTRFS_UUID_KEY_SUBVOL,
4521*4882a593Smuzhiyun key.objectid);
4522*4882a593Smuzhiyun if (ret < 0) {
4523*4882a593Smuzhiyun btrfs_warn(fs_info, "uuid_tree_add failed %d",
4524*4882a593Smuzhiyun ret);
4525*4882a593Smuzhiyun break;
4526*4882a593Smuzhiyun }
4527*4882a593Smuzhiyun }
4528*4882a593Smuzhiyun
4529*4882a593Smuzhiyun if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4530*4882a593Smuzhiyun ret = btrfs_uuid_tree_add(trans,
4531*4882a593Smuzhiyun root_item.received_uuid,
4532*4882a593Smuzhiyun BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4533*4882a593Smuzhiyun key.objectid);
4534*4882a593Smuzhiyun if (ret < 0) {
4535*4882a593Smuzhiyun btrfs_warn(fs_info, "uuid_tree_add failed %d",
4536*4882a593Smuzhiyun ret);
4537*4882a593Smuzhiyun break;
4538*4882a593Smuzhiyun }
4539*4882a593Smuzhiyun }
4540*4882a593Smuzhiyun
4541*4882a593Smuzhiyun skip:
4542*4882a593Smuzhiyun btrfs_release_path(path);
4543*4882a593Smuzhiyun if (trans) {
4544*4882a593Smuzhiyun ret = btrfs_end_transaction(trans);
4545*4882a593Smuzhiyun trans = NULL;
4546*4882a593Smuzhiyun if (ret)
4547*4882a593Smuzhiyun break;
4548*4882a593Smuzhiyun }
4549*4882a593Smuzhiyun
4550*4882a593Smuzhiyun if (key.offset < (u64)-1) {
4551*4882a593Smuzhiyun key.offset++;
4552*4882a593Smuzhiyun } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4553*4882a593Smuzhiyun key.offset = 0;
4554*4882a593Smuzhiyun key.type = BTRFS_ROOT_ITEM_KEY;
4555*4882a593Smuzhiyun } else if (key.objectid < (u64)-1) {
4556*4882a593Smuzhiyun key.offset = 0;
4557*4882a593Smuzhiyun key.type = BTRFS_ROOT_ITEM_KEY;
4558*4882a593Smuzhiyun key.objectid++;
4559*4882a593Smuzhiyun } else {
4560*4882a593Smuzhiyun break;
4561*4882a593Smuzhiyun }
4562*4882a593Smuzhiyun cond_resched();
4563*4882a593Smuzhiyun }
4564*4882a593Smuzhiyun
4565*4882a593Smuzhiyun out:
4566*4882a593Smuzhiyun btrfs_free_path(path);
4567*4882a593Smuzhiyun if (trans && !IS_ERR(trans))
4568*4882a593Smuzhiyun btrfs_end_transaction(trans);
4569*4882a593Smuzhiyun if (ret)
4570*4882a593Smuzhiyun btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4571*4882a593Smuzhiyun else if (!closing)
4572*4882a593Smuzhiyun set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4573*4882a593Smuzhiyun up(&fs_info->uuid_tree_rescan_sem);
4574*4882a593Smuzhiyun return 0;
4575*4882a593Smuzhiyun }
4576*4882a593Smuzhiyun
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4577*4882a593Smuzhiyun int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4578*4882a593Smuzhiyun {
4579*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
4580*4882a593Smuzhiyun struct btrfs_root *tree_root = fs_info->tree_root;
4581*4882a593Smuzhiyun struct btrfs_root *uuid_root;
4582*4882a593Smuzhiyun struct task_struct *task;
4583*4882a593Smuzhiyun int ret;
4584*4882a593Smuzhiyun
4585*4882a593Smuzhiyun /*
4586*4882a593Smuzhiyun * 1 - root node
4587*4882a593Smuzhiyun * 1 - root item
4588*4882a593Smuzhiyun */
4589*4882a593Smuzhiyun trans = btrfs_start_transaction(tree_root, 2);
4590*4882a593Smuzhiyun if (IS_ERR(trans))
4591*4882a593Smuzhiyun return PTR_ERR(trans);
4592*4882a593Smuzhiyun
4593*4882a593Smuzhiyun uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4594*4882a593Smuzhiyun if (IS_ERR(uuid_root)) {
4595*4882a593Smuzhiyun ret = PTR_ERR(uuid_root);
4596*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
4597*4882a593Smuzhiyun btrfs_end_transaction(trans);
4598*4882a593Smuzhiyun return ret;
4599*4882a593Smuzhiyun }
4600*4882a593Smuzhiyun
4601*4882a593Smuzhiyun fs_info->uuid_root = uuid_root;
4602*4882a593Smuzhiyun
4603*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
4604*4882a593Smuzhiyun if (ret)
4605*4882a593Smuzhiyun return ret;
4606*4882a593Smuzhiyun
4607*4882a593Smuzhiyun down(&fs_info->uuid_tree_rescan_sem);
4608*4882a593Smuzhiyun task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4609*4882a593Smuzhiyun if (IS_ERR(task)) {
4610*4882a593Smuzhiyun /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4611*4882a593Smuzhiyun btrfs_warn(fs_info, "failed to start uuid_scan task");
4612*4882a593Smuzhiyun up(&fs_info->uuid_tree_rescan_sem);
4613*4882a593Smuzhiyun return PTR_ERR(task);
4614*4882a593Smuzhiyun }
4615*4882a593Smuzhiyun
4616*4882a593Smuzhiyun return 0;
4617*4882a593Smuzhiyun }
4618*4882a593Smuzhiyun
4619*4882a593Smuzhiyun /*
4620*4882a593Smuzhiyun * shrinking a device means finding all of the device extents past
4621*4882a593Smuzhiyun * the new size, and then following the back refs to the chunks.
4622*4882a593Smuzhiyun * The chunk relocation code actually frees the device extent
4623*4882a593Smuzhiyun */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4624*4882a593Smuzhiyun int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4625*4882a593Smuzhiyun {
4626*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = device->fs_info;
4627*4882a593Smuzhiyun struct btrfs_root *root = fs_info->dev_root;
4628*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
4629*4882a593Smuzhiyun struct btrfs_dev_extent *dev_extent = NULL;
4630*4882a593Smuzhiyun struct btrfs_path *path;
4631*4882a593Smuzhiyun u64 length;
4632*4882a593Smuzhiyun u64 chunk_offset;
4633*4882a593Smuzhiyun int ret;
4634*4882a593Smuzhiyun int slot;
4635*4882a593Smuzhiyun int failed = 0;
4636*4882a593Smuzhiyun bool retried = false;
4637*4882a593Smuzhiyun struct extent_buffer *l;
4638*4882a593Smuzhiyun struct btrfs_key key;
4639*4882a593Smuzhiyun struct btrfs_super_block *super_copy = fs_info->super_copy;
4640*4882a593Smuzhiyun u64 old_total = btrfs_super_total_bytes(super_copy);
4641*4882a593Smuzhiyun u64 old_size = btrfs_device_get_total_bytes(device);
4642*4882a593Smuzhiyun u64 diff;
4643*4882a593Smuzhiyun u64 start;
4644*4882a593Smuzhiyun
4645*4882a593Smuzhiyun new_size = round_down(new_size, fs_info->sectorsize);
4646*4882a593Smuzhiyun start = new_size;
4647*4882a593Smuzhiyun diff = round_down(old_size - new_size, fs_info->sectorsize);
4648*4882a593Smuzhiyun
4649*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4650*4882a593Smuzhiyun return -EINVAL;
4651*4882a593Smuzhiyun
4652*4882a593Smuzhiyun path = btrfs_alloc_path();
4653*4882a593Smuzhiyun if (!path)
4654*4882a593Smuzhiyun return -ENOMEM;
4655*4882a593Smuzhiyun
4656*4882a593Smuzhiyun path->reada = READA_BACK;
4657*4882a593Smuzhiyun
4658*4882a593Smuzhiyun trans = btrfs_start_transaction(root, 0);
4659*4882a593Smuzhiyun if (IS_ERR(trans)) {
4660*4882a593Smuzhiyun btrfs_free_path(path);
4661*4882a593Smuzhiyun return PTR_ERR(trans);
4662*4882a593Smuzhiyun }
4663*4882a593Smuzhiyun
4664*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
4665*4882a593Smuzhiyun
4666*4882a593Smuzhiyun btrfs_device_set_total_bytes(device, new_size);
4667*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4668*4882a593Smuzhiyun device->fs_devices->total_rw_bytes -= diff;
4669*4882a593Smuzhiyun atomic64_sub(diff, &fs_info->free_chunk_space);
4670*4882a593Smuzhiyun }
4671*4882a593Smuzhiyun
4672*4882a593Smuzhiyun /*
4673*4882a593Smuzhiyun * Once the device's size has been set to the new size, ensure all
4674*4882a593Smuzhiyun * in-memory chunks are synced to disk so that the loop below sees them
4675*4882a593Smuzhiyun * and relocates them accordingly.
4676*4882a593Smuzhiyun */
4677*4882a593Smuzhiyun if (contains_pending_extent(device, &start, diff)) {
4678*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
4679*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
4680*4882a593Smuzhiyun if (ret)
4681*4882a593Smuzhiyun goto done;
4682*4882a593Smuzhiyun } else {
4683*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
4684*4882a593Smuzhiyun btrfs_end_transaction(trans);
4685*4882a593Smuzhiyun }
4686*4882a593Smuzhiyun
4687*4882a593Smuzhiyun again:
4688*4882a593Smuzhiyun key.objectid = device->devid;
4689*4882a593Smuzhiyun key.offset = (u64)-1;
4690*4882a593Smuzhiyun key.type = BTRFS_DEV_EXTENT_KEY;
4691*4882a593Smuzhiyun
4692*4882a593Smuzhiyun do {
4693*4882a593Smuzhiyun mutex_lock(&fs_info->delete_unused_bgs_mutex);
4694*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4695*4882a593Smuzhiyun if (ret < 0) {
4696*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4697*4882a593Smuzhiyun goto done;
4698*4882a593Smuzhiyun }
4699*4882a593Smuzhiyun
4700*4882a593Smuzhiyun ret = btrfs_previous_item(root, path, 0, key.type);
4701*4882a593Smuzhiyun if (ret)
4702*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4703*4882a593Smuzhiyun if (ret < 0)
4704*4882a593Smuzhiyun goto done;
4705*4882a593Smuzhiyun if (ret) {
4706*4882a593Smuzhiyun ret = 0;
4707*4882a593Smuzhiyun btrfs_release_path(path);
4708*4882a593Smuzhiyun break;
4709*4882a593Smuzhiyun }
4710*4882a593Smuzhiyun
4711*4882a593Smuzhiyun l = path->nodes[0];
4712*4882a593Smuzhiyun slot = path->slots[0];
4713*4882a593Smuzhiyun btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4714*4882a593Smuzhiyun
4715*4882a593Smuzhiyun if (key.objectid != device->devid) {
4716*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4717*4882a593Smuzhiyun btrfs_release_path(path);
4718*4882a593Smuzhiyun break;
4719*4882a593Smuzhiyun }
4720*4882a593Smuzhiyun
4721*4882a593Smuzhiyun dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4722*4882a593Smuzhiyun length = btrfs_dev_extent_length(l, dev_extent);
4723*4882a593Smuzhiyun
4724*4882a593Smuzhiyun if (key.offset + length <= new_size) {
4725*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4726*4882a593Smuzhiyun btrfs_release_path(path);
4727*4882a593Smuzhiyun break;
4728*4882a593Smuzhiyun }
4729*4882a593Smuzhiyun
4730*4882a593Smuzhiyun chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4731*4882a593Smuzhiyun btrfs_release_path(path);
4732*4882a593Smuzhiyun
4733*4882a593Smuzhiyun /*
4734*4882a593Smuzhiyun * We may be relocating the only data chunk we have,
4735*4882a593Smuzhiyun * which could potentially end up with losing data's
4736*4882a593Smuzhiyun * raid profile, so lets allocate an empty one in
4737*4882a593Smuzhiyun * advance.
4738*4882a593Smuzhiyun */
4739*4882a593Smuzhiyun ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4740*4882a593Smuzhiyun if (ret < 0) {
4741*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4742*4882a593Smuzhiyun goto done;
4743*4882a593Smuzhiyun }
4744*4882a593Smuzhiyun
4745*4882a593Smuzhiyun ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4746*4882a593Smuzhiyun mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4747*4882a593Smuzhiyun if (ret == -ENOSPC) {
4748*4882a593Smuzhiyun failed++;
4749*4882a593Smuzhiyun } else if (ret) {
4750*4882a593Smuzhiyun if (ret == -ETXTBSY) {
4751*4882a593Smuzhiyun btrfs_warn(fs_info,
4752*4882a593Smuzhiyun "could not shrink block group %llu due to active swapfile",
4753*4882a593Smuzhiyun chunk_offset);
4754*4882a593Smuzhiyun }
4755*4882a593Smuzhiyun goto done;
4756*4882a593Smuzhiyun }
4757*4882a593Smuzhiyun } while (key.offset-- > 0);
4758*4882a593Smuzhiyun
4759*4882a593Smuzhiyun if (failed && !retried) {
4760*4882a593Smuzhiyun failed = 0;
4761*4882a593Smuzhiyun retried = true;
4762*4882a593Smuzhiyun goto again;
4763*4882a593Smuzhiyun } else if (failed && retried) {
4764*4882a593Smuzhiyun ret = -ENOSPC;
4765*4882a593Smuzhiyun goto done;
4766*4882a593Smuzhiyun }
4767*4882a593Smuzhiyun
4768*4882a593Smuzhiyun /* Shrinking succeeded, else we would be at "done". */
4769*4882a593Smuzhiyun trans = btrfs_start_transaction(root, 0);
4770*4882a593Smuzhiyun if (IS_ERR(trans)) {
4771*4882a593Smuzhiyun ret = PTR_ERR(trans);
4772*4882a593Smuzhiyun goto done;
4773*4882a593Smuzhiyun }
4774*4882a593Smuzhiyun
4775*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
4776*4882a593Smuzhiyun /* Clear all state bits beyond the shrunk device size */
4777*4882a593Smuzhiyun clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4778*4882a593Smuzhiyun CHUNK_STATE_MASK);
4779*4882a593Smuzhiyun
4780*4882a593Smuzhiyun btrfs_device_set_disk_total_bytes(device, new_size);
4781*4882a593Smuzhiyun if (list_empty(&device->post_commit_list))
4782*4882a593Smuzhiyun list_add_tail(&device->post_commit_list,
4783*4882a593Smuzhiyun &trans->transaction->dev_update_list);
4784*4882a593Smuzhiyun
4785*4882a593Smuzhiyun WARN_ON(diff > old_total);
4786*4882a593Smuzhiyun btrfs_set_super_total_bytes(super_copy,
4787*4882a593Smuzhiyun round_down(old_total - diff, fs_info->sectorsize));
4788*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
4789*4882a593Smuzhiyun
4790*4882a593Smuzhiyun /* Now btrfs_update_device() will change the on-disk size. */
4791*4882a593Smuzhiyun ret = btrfs_update_device(trans, device);
4792*4882a593Smuzhiyun if (ret < 0) {
4793*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
4794*4882a593Smuzhiyun btrfs_end_transaction(trans);
4795*4882a593Smuzhiyun } else {
4796*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
4797*4882a593Smuzhiyun }
4798*4882a593Smuzhiyun done:
4799*4882a593Smuzhiyun btrfs_free_path(path);
4800*4882a593Smuzhiyun if (ret) {
4801*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
4802*4882a593Smuzhiyun btrfs_device_set_total_bytes(device, old_size);
4803*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4804*4882a593Smuzhiyun device->fs_devices->total_rw_bytes += diff;
4805*4882a593Smuzhiyun atomic64_add(diff, &fs_info->free_chunk_space);
4806*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
4807*4882a593Smuzhiyun }
4808*4882a593Smuzhiyun return ret;
4809*4882a593Smuzhiyun }
4810*4882a593Smuzhiyun
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)4811*4882a593Smuzhiyun static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4812*4882a593Smuzhiyun struct btrfs_key *key,
4813*4882a593Smuzhiyun struct btrfs_chunk *chunk, int item_size)
4814*4882a593Smuzhiyun {
4815*4882a593Smuzhiyun struct btrfs_super_block *super_copy = fs_info->super_copy;
4816*4882a593Smuzhiyun struct btrfs_disk_key disk_key;
4817*4882a593Smuzhiyun u32 array_size;
4818*4882a593Smuzhiyun u8 *ptr;
4819*4882a593Smuzhiyun
4820*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
4821*4882a593Smuzhiyun array_size = btrfs_super_sys_array_size(super_copy);
4822*4882a593Smuzhiyun if (array_size + item_size + sizeof(disk_key)
4823*4882a593Smuzhiyun > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4824*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
4825*4882a593Smuzhiyun return -EFBIG;
4826*4882a593Smuzhiyun }
4827*4882a593Smuzhiyun
4828*4882a593Smuzhiyun ptr = super_copy->sys_chunk_array + array_size;
4829*4882a593Smuzhiyun btrfs_cpu_key_to_disk(&disk_key, key);
4830*4882a593Smuzhiyun memcpy(ptr, &disk_key, sizeof(disk_key));
4831*4882a593Smuzhiyun ptr += sizeof(disk_key);
4832*4882a593Smuzhiyun memcpy(ptr, chunk, item_size);
4833*4882a593Smuzhiyun item_size += sizeof(disk_key);
4834*4882a593Smuzhiyun btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4835*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
4836*4882a593Smuzhiyun
4837*4882a593Smuzhiyun return 0;
4838*4882a593Smuzhiyun }
4839*4882a593Smuzhiyun
4840*4882a593Smuzhiyun /*
4841*4882a593Smuzhiyun * sort the devices in descending order by max_avail, total_avail
4842*4882a593Smuzhiyun */
btrfs_cmp_device_info(const void * a,const void * b)4843*4882a593Smuzhiyun static int btrfs_cmp_device_info(const void *a, const void *b)
4844*4882a593Smuzhiyun {
4845*4882a593Smuzhiyun const struct btrfs_device_info *di_a = a;
4846*4882a593Smuzhiyun const struct btrfs_device_info *di_b = b;
4847*4882a593Smuzhiyun
4848*4882a593Smuzhiyun if (di_a->max_avail > di_b->max_avail)
4849*4882a593Smuzhiyun return -1;
4850*4882a593Smuzhiyun if (di_a->max_avail < di_b->max_avail)
4851*4882a593Smuzhiyun return 1;
4852*4882a593Smuzhiyun if (di_a->total_avail > di_b->total_avail)
4853*4882a593Smuzhiyun return -1;
4854*4882a593Smuzhiyun if (di_a->total_avail < di_b->total_avail)
4855*4882a593Smuzhiyun return 1;
4856*4882a593Smuzhiyun return 0;
4857*4882a593Smuzhiyun }
4858*4882a593Smuzhiyun
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)4859*4882a593Smuzhiyun static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4860*4882a593Smuzhiyun {
4861*4882a593Smuzhiyun if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4862*4882a593Smuzhiyun return;
4863*4882a593Smuzhiyun
4864*4882a593Smuzhiyun btrfs_set_fs_incompat(info, RAID56);
4865*4882a593Smuzhiyun }
4866*4882a593Smuzhiyun
check_raid1c34_incompat_flag(struct btrfs_fs_info * info,u64 type)4867*4882a593Smuzhiyun static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4868*4882a593Smuzhiyun {
4869*4882a593Smuzhiyun if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4870*4882a593Smuzhiyun return;
4871*4882a593Smuzhiyun
4872*4882a593Smuzhiyun btrfs_set_fs_incompat(info, RAID1C34);
4873*4882a593Smuzhiyun }
4874*4882a593Smuzhiyun
4875*4882a593Smuzhiyun /*
4876*4882a593Smuzhiyun * Structure used internally for __btrfs_alloc_chunk() function.
4877*4882a593Smuzhiyun * Wraps needed parameters.
4878*4882a593Smuzhiyun */
4879*4882a593Smuzhiyun struct alloc_chunk_ctl {
4880*4882a593Smuzhiyun u64 start;
4881*4882a593Smuzhiyun u64 type;
4882*4882a593Smuzhiyun /* Total number of stripes to allocate */
4883*4882a593Smuzhiyun int num_stripes;
4884*4882a593Smuzhiyun /* sub_stripes info for map */
4885*4882a593Smuzhiyun int sub_stripes;
4886*4882a593Smuzhiyun /* Stripes per device */
4887*4882a593Smuzhiyun int dev_stripes;
4888*4882a593Smuzhiyun /* Maximum number of devices to use */
4889*4882a593Smuzhiyun int devs_max;
4890*4882a593Smuzhiyun /* Minimum number of devices to use */
4891*4882a593Smuzhiyun int devs_min;
4892*4882a593Smuzhiyun /* ndevs has to be a multiple of this */
4893*4882a593Smuzhiyun int devs_increment;
4894*4882a593Smuzhiyun /* Number of copies */
4895*4882a593Smuzhiyun int ncopies;
4896*4882a593Smuzhiyun /* Number of stripes worth of bytes to store parity information */
4897*4882a593Smuzhiyun int nparity;
4898*4882a593Smuzhiyun u64 max_stripe_size;
4899*4882a593Smuzhiyun u64 max_chunk_size;
4900*4882a593Smuzhiyun u64 dev_extent_min;
4901*4882a593Smuzhiyun u64 stripe_size;
4902*4882a593Smuzhiyun u64 chunk_size;
4903*4882a593Smuzhiyun int ndevs;
4904*4882a593Smuzhiyun };
4905*4882a593Smuzhiyun
init_alloc_chunk_ctl_policy_regular(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)4906*4882a593Smuzhiyun static void init_alloc_chunk_ctl_policy_regular(
4907*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices,
4908*4882a593Smuzhiyun struct alloc_chunk_ctl *ctl)
4909*4882a593Smuzhiyun {
4910*4882a593Smuzhiyun u64 type = ctl->type;
4911*4882a593Smuzhiyun
4912*4882a593Smuzhiyun if (type & BTRFS_BLOCK_GROUP_DATA) {
4913*4882a593Smuzhiyun ctl->max_stripe_size = SZ_1G;
4914*4882a593Smuzhiyun ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4915*4882a593Smuzhiyun } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4916*4882a593Smuzhiyun /* For larger filesystems, use larger metadata chunks */
4917*4882a593Smuzhiyun if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4918*4882a593Smuzhiyun ctl->max_stripe_size = SZ_1G;
4919*4882a593Smuzhiyun else
4920*4882a593Smuzhiyun ctl->max_stripe_size = SZ_256M;
4921*4882a593Smuzhiyun ctl->max_chunk_size = ctl->max_stripe_size;
4922*4882a593Smuzhiyun } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4923*4882a593Smuzhiyun ctl->max_stripe_size = SZ_32M;
4924*4882a593Smuzhiyun ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4925*4882a593Smuzhiyun ctl->devs_max = min_t(int, ctl->devs_max,
4926*4882a593Smuzhiyun BTRFS_MAX_DEVS_SYS_CHUNK);
4927*4882a593Smuzhiyun } else {
4928*4882a593Smuzhiyun BUG();
4929*4882a593Smuzhiyun }
4930*4882a593Smuzhiyun
4931*4882a593Smuzhiyun /* We don't want a chunk larger than 10% of writable space */
4932*4882a593Smuzhiyun ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4933*4882a593Smuzhiyun ctl->max_chunk_size);
4934*4882a593Smuzhiyun ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
4935*4882a593Smuzhiyun }
4936*4882a593Smuzhiyun
init_alloc_chunk_ctl(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)4937*4882a593Smuzhiyun static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
4938*4882a593Smuzhiyun struct alloc_chunk_ctl *ctl)
4939*4882a593Smuzhiyun {
4940*4882a593Smuzhiyun int index = btrfs_bg_flags_to_raid_index(ctl->type);
4941*4882a593Smuzhiyun
4942*4882a593Smuzhiyun ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
4943*4882a593Smuzhiyun ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
4944*4882a593Smuzhiyun ctl->devs_max = btrfs_raid_array[index].devs_max;
4945*4882a593Smuzhiyun if (!ctl->devs_max)
4946*4882a593Smuzhiyun ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
4947*4882a593Smuzhiyun ctl->devs_min = btrfs_raid_array[index].devs_min;
4948*4882a593Smuzhiyun ctl->devs_increment = btrfs_raid_array[index].devs_increment;
4949*4882a593Smuzhiyun ctl->ncopies = btrfs_raid_array[index].ncopies;
4950*4882a593Smuzhiyun ctl->nparity = btrfs_raid_array[index].nparity;
4951*4882a593Smuzhiyun ctl->ndevs = 0;
4952*4882a593Smuzhiyun
4953*4882a593Smuzhiyun switch (fs_devices->chunk_alloc_policy) {
4954*4882a593Smuzhiyun case BTRFS_CHUNK_ALLOC_REGULAR:
4955*4882a593Smuzhiyun init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
4956*4882a593Smuzhiyun break;
4957*4882a593Smuzhiyun default:
4958*4882a593Smuzhiyun BUG();
4959*4882a593Smuzhiyun }
4960*4882a593Smuzhiyun }
4961*4882a593Smuzhiyun
gather_device_info(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)4962*4882a593Smuzhiyun static int gather_device_info(struct btrfs_fs_devices *fs_devices,
4963*4882a593Smuzhiyun struct alloc_chunk_ctl *ctl,
4964*4882a593Smuzhiyun struct btrfs_device_info *devices_info)
4965*4882a593Smuzhiyun {
4966*4882a593Smuzhiyun struct btrfs_fs_info *info = fs_devices->fs_info;
4967*4882a593Smuzhiyun struct btrfs_device *device;
4968*4882a593Smuzhiyun u64 total_avail;
4969*4882a593Smuzhiyun u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
4970*4882a593Smuzhiyun int ret;
4971*4882a593Smuzhiyun int ndevs = 0;
4972*4882a593Smuzhiyun u64 max_avail;
4973*4882a593Smuzhiyun u64 dev_offset;
4974*4882a593Smuzhiyun
4975*4882a593Smuzhiyun /*
4976*4882a593Smuzhiyun * in the first pass through the devices list, we gather information
4977*4882a593Smuzhiyun * about the available holes on each device.
4978*4882a593Smuzhiyun */
4979*4882a593Smuzhiyun list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4980*4882a593Smuzhiyun if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4981*4882a593Smuzhiyun WARN(1, KERN_ERR
4982*4882a593Smuzhiyun "BTRFS: read-only device in alloc_list\n");
4983*4882a593Smuzhiyun continue;
4984*4882a593Smuzhiyun }
4985*4882a593Smuzhiyun
4986*4882a593Smuzhiyun if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
4987*4882a593Smuzhiyun &device->dev_state) ||
4988*4882a593Smuzhiyun test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4989*4882a593Smuzhiyun continue;
4990*4882a593Smuzhiyun
4991*4882a593Smuzhiyun if (device->total_bytes > device->bytes_used)
4992*4882a593Smuzhiyun total_avail = device->total_bytes - device->bytes_used;
4993*4882a593Smuzhiyun else
4994*4882a593Smuzhiyun total_avail = 0;
4995*4882a593Smuzhiyun
4996*4882a593Smuzhiyun /* If there is no space on this device, skip it. */
4997*4882a593Smuzhiyun if (total_avail < ctl->dev_extent_min)
4998*4882a593Smuzhiyun continue;
4999*4882a593Smuzhiyun
5000*4882a593Smuzhiyun ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5001*4882a593Smuzhiyun &max_avail);
5002*4882a593Smuzhiyun if (ret && ret != -ENOSPC)
5003*4882a593Smuzhiyun return ret;
5004*4882a593Smuzhiyun
5005*4882a593Smuzhiyun if (ret == 0)
5006*4882a593Smuzhiyun max_avail = dev_extent_want;
5007*4882a593Smuzhiyun
5008*4882a593Smuzhiyun if (max_avail < ctl->dev_extent_min) {
5009*4882a593Smuzhiyun if (btrfs_test_opt(info, ENOSPC_DEBUG))
5010*4882a593Smuzhiyun btrfs_debug(info,
5011*4882a593Smuzhiyun "%s: devid %llu has no free space, have=%llu want=%llu",
5012*4882a593Smuzhiyun __func__, device->devid, max_avail,
5013*4882a593Smuzhiyun ctl->dev_extent_min);
5014*4882a593Smuzhiyun continue;
5015*4882a593Smuzhiyun }
5016*4882a593Smuzhiyun
5017*4882a593Smuzhiyun if (ndevs == fs_devices->rw_devices) {
5018*4882a593Smuzhiyun WARN(1, "%s: found more than %llu devices\n",
5019*4882a593Smuzhiyun __func__, fs_devices->rw_devices);
5020*4882a593Smuzhiyun break;
5021*4882a593Smuzhiyun }
5022*4882a593Smuzhiyun devices_info[ndevs].dev_offset = dev_offset;
5023*4882a593Smuzhiyun devices_info[ndevs].max_avail = max_avail;
5024*4882a593Smuzhiyun devices_info[ndevs].total_avail = total_avail;
5025*4882a593Smuzhiyun devices_info[ndevs].dev = device;
5026*4882a593Smuzhiyun ++ndevs;
5027*4882a593Smuzhiyun }
5028*4882a593Smuzhiyun ctl->ndevs = ndevs;
5029*4882a593Smuzhiyun
5030*4882a593Smuzhiyun /*
5031*4882a593Smuzhiyun * now sort the devices by hole size / available space
5032*4882a593Smuzhiyun */
5033*4882a593Smuzhiyun sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5034*4882a593Smuzhiyun btrfs_cmp_device_info, NULL);
5035*4882a593Smuzhiyun
5036*4882a593Smuzhiyun return 0;
5037*4882a593Smuzhiyun }
5038*4882a593Smuzhiyun
decide_stripe_size_regular(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5039*4882a593Smuzhiyun static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5040*4882a593Smuzhiyun struct btrfs_device_info *devices_info)
5041*4882a593Smuzhiyun {
5042*4882a593Smuzhiyun /* Number of stripes that count for block group size */
5043*4882a593Smuzhiyun int data_stripes;
5044*4882a593Smuzhiyun
5045*4882a593Smuzhiyun /*
5046*4882a593Smuzhiyun * The primary goal is to maximize the number of stripes, so use as
5047*4882a593Smuzhiyun * many devices as possible, even if the stripes are not maximum sized.
5048*4882a593Smuzhiyun *
5049*4882a593Smuzhiyun * The DUP profile stores more than one stripe per device, the
5050*4882a593Smuzhiyun * max_avail is the total size so we have to adjust.
5051*4882a593Smuzhiyun */
5052*4882a593Smuzhiyun ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5053*4882a593Smuzhiyun ctl->dev_stripes);
5054*4882a593Smuzhiyun ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5055*4882a593Smuzhiyun
5056*4882a593Smuzhiyun /* This will have to be fixed for RAID1 and RAID10 over more drives */
5057*4882a593Smuzhiyun data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5058*4882a593Smuzhiyun
5059*4882a593Smuzhiyun /*
5060*4882a593Smuzhiyun * Use the number of data stripes to figure out how big this chunk is
5061*4882a593Smuzhiyun * really going to be in terms of logical address space, and compare
5062*4882a593Smuzhiyun * that answer with the max chunk size. If it's higher, we try to
5063*4882a593Smuzhiyun * reduce stripe_size.
5064*4882a593Smuzhiyun */
5065*4882a593Smuzhiyun if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5066*4882a593Smuzhiyun /*
5067*4882a593Smuzhiyun * Reduce stripe_size, round it up to a 16MB boundary again and
5068*4882a593Smuzhiyun * then use it, unless it ends up being even bigger than the
5069*4882a593Smuzhiyun * previous value we had already.
5070*4882a593Smuzhiyun */
5071*4882a593Smuzhiyun ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5072*4882a593Smuzhiyun data_stripes), SZ_16M),
5073*4882a593Smuzhiyun ctl->stripe_size);
5074*4882a593Smuzhiyun }
5075*4882a593Smuzhiyun
5076*4882a593Smuzhiyun /* Align to BTRFS_STRIPE_LEN */
5077*4882a593Smuzhiyun ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5078*4882a593Smuzhiyun ctl->chunk_size = ctl->stripe_size * data_stripes;
5079*4882a593Smuzhiyun
5080*4882a593Smuzhiyun return 0;
5081*4882a593Smuzhiyun }
5082*4882a593Smuzhiyun
decide_stripe_size(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5083*4882a593Smuzhiyun static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5084*4882a593Smuzhiyun struct alloc_chunk_ctl *ctl,
5085*4882a593Smuzhiyun struct btrfs_device_info *devices_info)
5086*4882a593Smuzhiyun {
5087*4882a593Smuzhiyun struct btrfs_fs_info *info = fs_devices->fs_info;
5088*4882a593Smuzhiyun
5089*4882a593Smuzhiyun /*
5090*4882a593Smuzhiyun * Round down to number of usable stripes, devs_increment can be any
5091*4882a593Smuzhiyun * number so we can't use round_down() that requires power of 2, while
5092*4882a593Smuzhiyun * rounddown is safe.
5093*4882a593Smuzhiyun */
5094*4882a593Smuzhiyun ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5095*4882a593Smuzhiyun
5096*4882a593Smuzhiyun if (ctl->ndevs < ctl->devs_min) {
5097*4882a593Smuzhiyun if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5098*4882a593Smuzhiyun btrfs_debug(info,
5099*4882a593Smuzhiyun "%s: not enough devices with free space: have=%d minimum required=%d",
5100*4882a593Smuzhiyun __func__, ctl->ndevs, ctl->devs_min);
5101*4882a593Smuzhiyun }
5102*4882a593Smuzhiyun return -ENOSPC;
5103*4882a593Smuzhiyun }
5104*4882a593Smuzhiyun
5105*4882a593Smuzhiyun ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5106*4882a593Smuzhiyun
5107*4882a593Smuzhiyun switch (fs_devices->chunk_alloc_policy) {
5108*4882a593Smuzhiyun case BTRFS_CHUNK_ALLOC_REGULAR:
5109*4882a593Smuzhiyun return decide_stripe_size_regular(ctl, devices_info);
5110*4882a593Smuzhiyun default:
5111*4882a593Smuzhiyun BUG();
5112*4882a593Smuzhiyun }
5113*4882a593Smuzhiyun }
5114*4882a593Smuzhiyun
create_chunk(struct btrfs_trans_handle * trans,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5115*4882a593Smuzhiyun static int create_chunk(struct btrfs_trans_handle *trans,
5116*4882a593Smuzhiyun struct alloc_chunk_ctl *ctl,
5117*4882a593Smuzhiyun struct btrfs_device_info *devices_info)
5118*4882a593Smuzhiyun {
5119*4882a593Smuzhiyun struct btrfs_fs_info *info = trans->fs_info;
5120*4882a593Smuzhiyun struct map_lookup *map = NULL;
5121*4882a593Smuzhiyun struct extent_map_tree *em_tree;
5122*4882a593Smuzhiyun struct extent_map *em;
5123*4882a593Smuzhiyun u64 start = ctl->start;
5124*4882a593Smuzhiyun u64 type = ctl->type;
5125*4882a593Smuzhiyun int ret;
5126*4882a593Smuzhiyun int i;
5127*4882a593Smuzhiyun int j;
5128*4882a593Smuzhiyun
5129*4882a593Smuzhiyun map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5130*4882a593Smuzhiyun if (!map)
5131*4882a593Smuzhiyun return -ENOMEM;
5132*4882a593Smuzhiyun map->num_stripes = ctl->num_stripes;
5133*4882a593Smuzhiyun
5134*4882a593Smuzhiyun for (i = 0; i < ctl->ndevs; ++i) {
5135*4882a593Smuzhiyun for (j = 0; j < ctl->dev_stripes; ++j) {
5136*4882a593Smuzhiyun int s = i * ctl->dev_stripes + j;
5137*4882a593Smuzhiyun map->stripes[s].dev = devices_info[i].dev;
5138*4882a593Smuzhiyun map->stripes[s].physical = devices_info[i].dev_offset +
5139*4882a593Smuzhiyun j * ctl->stripe_size;
5140*4882a593Smuzhiyun }
5141*4882a593Smuzhiyun }
5142*4882a593Smuzhiyun map->stripe_len = BTRFS_STRIPE_LEN;
5143*4882a593Smuzhiyun map->io_align = BTRFS_STRIPE_LEN;
5144*4882a593Smuzhiyun map->io_width = BTRFS_STRIPE_LEN;
5145*4882a593Smuzhiyun map->type = type;
5146*4882a593Smuzhiyun map->sub_stripes = ctl->sub_stripes;
5147*4882a593Smuzhiyun
5148*4882a593Smuzhiyun trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5149*4882a593Smuzhiyun
5150*4882a593Smuzhiyun em = alloc_extent_map();
5151*4882a593Smuzhiyun if (!em) {
5152*4882a593Smuzhiyun kfree(map);
5153*4882a593Smuzhiyun return -ENOMEM;
5154*4882a593Smuzhiyun }
5155*4882a593Smuzhiyun set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5156*4882a593Smuzhiyun em->map_lookup = map;
5157*4882a593Smuzhiyun em->start = start;
5158*4882a593Smuzhiyun em->len = ctl->chunk_size;
5159*4882a593Smuzhiyun em->block_start = 0;
5160*4882a593Smuzhiyun em->block_len = em->len;
5161*4882a593Smuzhiyun em->orig_block_len = ctl->stripe_size;
5162*4882a593Smuzhiyun
5163*4882a593Smuzhiyun em_tree = &info->mapping_tree;
5164*4882a593Smuzhiyun write_lock(&em_tree->lock);
5165*4882a593Smuzhiyun ret = add_extent_mapping(em_tree, em, 0);
5166*4882a593Smuzhiyun if (ret) {
5167*4882a593Smuzhiyun write_unlock(&em_tree->lock);
5168*4882a593Smuzhiyun free_extent_map(em);
5169*4882a593Smuzhiyun return ret;
5170*4882a593Smuzhiyun }
5171*4882a593Smuzhiyun write_unlock(&em_tree->lock);
5172*4882a593Smuzhiyun
5173*4882a593Smuzhiyun ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5174*4882a593Smuzhiyun if (ret)
5175*4882a593Smuzhiyun goto error_del_extent;
5176*4882a593Smuzhiyun
5177*4882a593Smuzhiyun for (i = 0; i < map->num_stripes; i++) {
5178*4882a593Smuzhiyun struct btrfs_device *dev = map->stripes[i].dev;
5179*4882a593Smuzhiyun
5180*4882a593Smuzhiyun btrfs_device_set_bytes_used(dev,
5181*4882a593Smuzhiyun dev->bytes_used + ctl->stripe_size);
5182*4882a593Smuzhiyun if (list_empty(&dev->post_commit_list))
5183*4882a593Smuzhiyun list_add_tail(&dev->post_commit_list,
5184*4882a593Smuzhiyun &trans->transaction->dev_update_list);
5185*4882a593Smuzhiyun }
5186*4882a593Smuzhiyun
5187*4882a593Smuzhiyun atomic64_sub(ctl->stripe_size * map->num_stripes,
5188*4882a593Smuzhiyun &info->free_chunk_space);
5189*4882a593Smuzhiyun
5190*4882a593Smuzhiyun free_extent_map(em);
5191*4882a593Smuzhiyun check_raid56_incompat_flag(info, type);
5192*4882a593Smuzhiyun check_raid1c34_incompat_flag(info, type);
5193*4882a593Smuzhiyun
5194*4882a593Smuzhiyun return 0;
5195*4882a593Smuzhiyun
5196*4882a593Smuzhiyun error_del_extent:
5197*4882a593Smuzhiyun write_lock(&em_tree->lock);
5198*4882a593Smuzhiyun remove_extent_mapping(em_tree, em);
5199*4882a593Smuzhiyun write_unlock(&em_tree->lock);
5200*4882a593Smuzhiyun
5201*4882a593Smuzhiyun /* One for our allocation */
5202*4882a593Smuzhiyun free_extent_map(em);
5203*4882a593Smuzhiyun /* One for the tree reference */
5204*4882a593Smuzhiyun free_extent_map(em);
5205*4882a593Smuzhiyun
5206*4882a593Smuzhiyun return ret;
5207*4882a593Smuzhiyun }
5208*4882a593Smuzhiyun
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,u64 type)5209*4882a593Smuzhiyun int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5210*4882a593Smuzhiyun {
5211*4882a593Smuzhiyun struct btrfs_fs_info *info = trans->fs_info;
5212*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = info->fs_devices;
5213*4882a593Smuzhiyun struct btrfs_device_info *devices_info = NULL;
5214*4882a593Smuzhiyun struct alloc_chunk_ctl ctl;
5215*4882a593Smuzhiyun int ret;
5216*4882a593Smuzhiyun
5217*4882a593Smuzhiyun lockdep_assert_held(&info->chunk_mutex);
5218*4882a593Smuzhiyun
5219*4882a593Smuzhiyun if (!alloc_profile_is_valid(type, 0)) {
5220*4882a593Smuzhiyun ASSERT(0);
5221*4882a593Smuzhiyun return -EINVAL;
5222*4882a593Smuzhiyun }
5223*4882a593Smuzhiyun
5224*4882a593Smuzhiyun if (list_empty(&fs_devices->alloc_list)) {
5225*4882a593Smuzhiyun if (btrfs_test_opt(info, ENOSPC_DEBUG))
5226*4882a593Smuzhiyun btrfs_debug(info, "%s: no writable device", __func__);
5227*4882a593Smuzhiyun return -ENOSPC;
5228*4882a593Smuzhiyun }
5229*4882a593Smuzhiyun
5230*4882a593Smuzhiyun if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5231*4882a593Smuzhiyun btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5232*4882a593Smuzhiyun ASSERT(0);
5233*4882a593Smuzhiyun return -EINVAL;
5234*4882a593Smuzhiyun }
5235*4882a593Smuzhiyun
5236*4882a593Smuzhiyun ctl.start = find_next_chunk(info);
5237*4882a593Smuzhiyun ctl.type = type;
5238*4882a593Smuzhiyun init_alloc_chunk_ctl(fs_devices, &ctl);
5239*4882a593Smuzhiyun
5240*4882a593Smuzhiyun devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5241*4882a593Smuzhiyun GFP_NOFS);
5242*4882a593Smuzhiyun if (!devices_info)
5243*4882a593Smuzhiyun return -ENOMEM;
5244*4882a593Smuzhiyun
5245*4882a593Smuzhiyun ret = gather_device_info(fs_devices, &ctl, devices_info);
5246*4882a593Smuzhiyun if (ret < 0)
5247*4882a593Smuzhiyun goto out;
5248*4882a593Smuzhiyun
5249*4882a593Smuzhiyun ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5250*4882a593Smuzhiyun if (ret < 0)
5251*4882a593Smuzhiyun goto out;
5252*4882a593Smuzhiyun
5253*4882a593Smuzhiyun ret = create_chunk(trans, &ctl, devices_info);
5254*4882a593Smuzhiyun
5255*4882a593Smuzhiyun out:
5256*4882a593Smuzhiyun kfree(devices_info);
5257*4882a593Smuzhiyun return ret;
5258*4882a593Smuzhiyun }
5259*4882a593Smuzhiyun
5260*4882a593Smuzhiyun /*
5261*4882a593Smuzhiyun * Chunk allocation falls into two parts. The first part does work
5262*4882a593Smuzhiyun * that makes the new allocated chunk usable, but does not do any operation
5263*4882a593Smuzhiyun * that modifies the chunk tree. The second part does the work that
5264*4882a593Smuzhiyun * requires modifying the chunk tree. This division is important for the
5265*4882a593Smuzhiyun * bootstrap process of adding storage to a seed btrfs.
5266*4882a593Smuzhiyun */
btrfs_finish_chunk_alloc(struct btrfs_trans_handle * trans,u64 chunk_offset,u64 chunk_size)5267*4882a593Smuzhiyun int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5268*4882a593Smuzhiyun u64 chunk_offset, u64 chunk_size)
5269*4882a593Smuzhiyun {
5270*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
5271*4882a593Smuzhiyun struct btrfs_root *extent_root = fs_info->extent_root;
5272*4882a593Smuzhiyun struct btrfs_root *chunk_root = fs_info->chunk_root;
5273*4882a593Smuzhiyun struct btrfs_key key;
5274*4882a593Smuzhiyun struct btrfs_device *device;
5275*4882a593Smuzhiyun struct btrfs_chunk *chunk;
5276*4882a593Smuzhiyun struct btrfs_stripe *stripe;
5277*4882a593Smuzhiyun struct extent_map *em;
5278*4882a593Smuzhiyun struct map_lookup *map;
5279*4882a593Smuzhiyun size_t item_size;
5280*4882a593Smuzhiyun u64 dev_offset;
5281*4882a593Smuzhiyun u64 stripe_size;
5282*4882a593Smuzhiyun int i = 0;
5283*4882a593Smuzhiyun int ret = 0;
5284*4882a593Smuzhiyun
5285*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5286*4882a593Smuzhiyun if (IS_ERR(em))
5287*4882a593Smuzhiyun return PTR_ERR(em);
5288*4882a593Smuzhiyun
5289*4882a593Smuzhiyun map = em->map_lookup;
5290*4882a593Smuzhiyun item_size = btrfs_chunk_item_size(map->num_stripes);
5291*4882a593Smuzhiyun stripe_size = em->orig_block_len;
5292*4882a593Smuzhiyun
5293*4882a593Smuzhiyun chunk = kzalloc(item_size, GFP_NOFS);
5294*4882a593Smuzhiyun if (!chunk) {
5295*4882a593Smuzhiyun ret = -ENOMEM;
5296*4882a593Smuzhiyun goto out;
5297*4882a593Smuzhiyun }
5298*4882a593Smuzhiyun
5299*4882a593Smuzhiyun /*
5300*4882a593Smuzhiyun * Take the device list mutex to prevent races with the final phase of
5301*4882a593Smuzhiyun * a device replace operation that replaces the device object associated
5302*4882a593Smuzhiyun * with the map's stripes, because the device object's id can change
5303*4882a593Smuzhiyun * at any time during that final phase of the device replace operation
5304*4882a593Smuzhiyun * (dev-replace.c:btrfs_dev_replace_finishing()).
5305*4882a593Smuzhiyun */
5306*4882a593Smuzhiyun mutex_lock(&fs_info->fs_devices->device_list_mutex);
5307*4882a593Smuzhiyun for (i = 0; i < map->num_stripes; i++) {
5308*4882a593Smuzhiyun device = map->stripes[i].dev;
5309*4882a593Smuzhiyun dev_offset = map->stripes[i].physical;
5310*4882a593Smuzhiyun
5311*4882a593Smuzhiyun ret = btrfs_update_device(trans, device);
5312*4882a593Smuzhiyun if (ret)
5313*4882a593Smuzhiyun break;
5314*4882a593Smuzhiyun ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5315*4882a593Smuzhiyun dev_offset, stripe_size);
5316*4882a593Smuzhiyun if (ret)
5317*4882a593Smuzhiyun break;
5318*4882a593Smuzhiyun }
5319*4882a593Smuzhiyun if (ret) {
5320*4882a593Smuzhiyun mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5321*4882a593Smuzhiyun goto out;
5322*4882a593Smuzhiyun }
5323*4882a593Smuzhiyun
5324*4882a593Smuzhiyun stripe = &chunk->stripe;
5325*4882a593Smuzhiyun for (i = 0; i < map->num_stripes; i++) {
5326*4882a593Smuzhiyun device = map->stripes[i].dev;
5327*4882a593Smuzhiyun dev_offset = map->stripes[i].physical;
5328*4882a593Smuzhiyun
5329*4882a593Smuzhiyun btrfs_set_stack_stripe_devid(stripe, device->devid);
5330*4882a593Smuzhiyun btrfs_set_stack_stripe_offset(stripe, dev_offset);
5331*4882a593Smuzhiyun memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5332*4882a593Smuzhiyun stripe++;
5333*4882a593Smuzhiyun }
5334*4882a593Smuzhiyun mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5335*4882a593Smuzhiyun
5336*4882a593Smuzhiyun btrfs_set_stack_chunk_length(chunk, chunk_size);
5337*4882a593Smuzhiyun btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5338*4882a593Smuzhiyun btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5339*4882a593Smuzhiyun btrfs_set_stack_chunk_type(chunk, map->type);
5340*4882a593Smuzhiyun btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5341*4882a593Smuzhiyun btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5342*4882a593Smuzhiyun btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5343*4882a593Smuzhiyun btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5344*4882a593Smuzhiyun btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5345*4882a593Smuzhiyun
5346*4882a593Smuzhiyun key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5347*4882a593Smuzhiyun key.type = BTRFS_CHUNK_ITEM_KEY;
5348*4882a593Smuzhiyun key.offset = chunk_offset;
5349*4882a593Smuzhiyun
5350*4882a593Smuzhiyun ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5351*4882a593Smuzhiyun if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5352*4882a593Smuzhiyun /*
5353*4882a593Smuzhiyun * TODO: Cleanup of inserted chunk root in case of
5354*4882a593Smuzhiyun * failure.
5355*4882a593Smuzhiyun */
5356*4882a593Smuzhiyun ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5357*4882a593Smuzhiyun }
5358*4882a593Smuzhiyun
5359*4882a593Smuzhiyun out:
5360*4882a593Smuzhiyun kfree(chunk);
5361*4882a593Smuzhiyun free_extent_map(em);
5362*4882a593Smuzhiyun return ret;
5363*4882a593Smuzhiyun }
5364*4882a593Smuzhiyun
init_first_rw_device(struct btrfs_trans_handle * trans)5365*4882a593Smuzhiyun static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5366*4882a593Smuzhiyun {
5367*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
5368*4882a593Smuzhiyun u64 alloc_profile;
5369*4882a593Smuzhiyun int ret;
5370*4882a593Smuzhiyun
5371*4882a593Smuzhiyun alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5372*4882a593Smuzhiyun ret = btrfs_alloc_chunk(trans, alloc_profile);
5373*4882a593Smuzhiyun if (ret)
5374*4882a593Smuzhiyun return ret;
5375*4882a593Smuzhiyun
5376*4882a593Smuzhiyun alloc_profile = btrfs_system_alloc_profile(fs_info);
5377*4882a593Smuzhiyun ret = btrfs_alloc_chunk(trans, alloc_profile);
5378*4882a593Smuzhiyun return ret;
5379*4882a593Smuzhiyun }
5380*4882a593Smuzhiyun
btrfs_chunk_max_errors(struct map_lookup * map)5381*4882a593Smuzhiyun static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5382*4882a593Smuzhiyun {
5383*4882a593Smuzhiyun const int index = btrfs_bg_flags_to_raid_index(map->type);
5384*4882a593Smuzhiyun
5385*4882a593Smuzhiyun return btrfs_raid_array[index].tolerated_failures;
5386*4882a593Smuzhiyun }
5387*4882a593Smuzhiyun
btrfs_chunk_readonly(struct btrfs_fs_info * fs_info,u64 chunk_offset)5388*4882a593Smuzhiyun int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5389*4882a593Smuzhiyun {
5390*4882a593Smuzhiyun struct extent_map *em;
5391*4882a593Smuzhiyun struct map_lookup *map;
5392*4882a593Smuzhiyun int readonly = 0;
5393*4882a593Smuzhiyun int miss_ndevs = 0;
5394*4882a593Smuzhiyun int i;
5395*4882a593Smuzhiyun
5396*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5397*4882a593Smuzhiyun if (IS_ERR(em))
5398*4882a593Smuzhiyun return 1;
5399*4882a593Smuzhiyun
5400*4882a593Smuzhiyun map = em->map_lookup;
5401*4882a593Smuzhiyun for (i = 0; i < map->num_stripes; i++) {
5402*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_MISSING,
5403*4882a593Smuzhiyun &map->stripes[i].dev->dev_state)) {
5404*4882a593Smuzhiyun miss_ndevs++;
5405*4882a593Smuzhiyun continue;
5406*4882a593Smuzhiyun }
5407*4882a593Smuzhiyun if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5408*4882a593Smuzhiyun &map->stripes[i].dev->dev_state)) {
5409*4882a593Smuzhiyun readonly = 1;
5410*4882a593Smuzhiyun goto end;
5411*4882a593Smuzhiyun }
5412*4882a593Smuzhiyun }
5413*4882a593Smuzhiyun
5414*4882a593Smuzhiyun /*
5415*4882a593Smuzhiyun * If the number of missing devices is larger than max errors,
5416*4882a593Smuzhiyun * we can not write the data into that chunk successfully, so
5417*4882a593Smuzhiyun * set it readonly.
5418*4882a593Smuzhiyun */
5419*4882a593Smuzhiyun if (miss_ndevs > btrfs_chunk_max_errors(map))
5420*4882a593Smuzhiyun readonly = 1;
5421*4882a593Smuzhiyun end:
5422*4882a593Smuzhiyun free_extent_map(em);
5423*4882a593Smuzhiyun return readonly;
5424*4882a593Smuzhiyun }
5425*4882a593Smuzhiyun
btrfs_mapping_tree_free(struct extent_map_tree * tree)5426*4882a593Smuzhiyun void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5427*4882a593Smuzhiyun {
5428*4882a593Smuzhiyun struct extent_map *em;
5429*4882a593Smuzhiyun
5430*4882a593Smuzhiyun while (1) {
5431*4882a593Smuzhiyun write_lock(&tree->lock);
5432*4882a593Smuzhiyun em = lookup_extent_mapping(tree, 0, (u64)-1);
5433*4882a593Smuzhiyun if (em)
5434*4882a593Smuzhiyun remove_extent_mapping(tree, em);
5435*4882a593Smuzhiyun write_unlock(&tree->lock);
5436*4882a593Smuzhiyun if (!em)
5437*4882a593Smuzhiyun break;
5438*4882a593Smuzhiyun /* once for us */
5439*4882a593Smuzhiyun free_extent_map(em);
5440*4882a593Smuzhiyun /* once for the tree */
5441*4882a593Smuzhiyun free_extent_map(em);
5442*4882a593Smuzhiyun }
5443*4882a593Smuzhiyun }
5444*4882a593Smuzhiyun
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5445*4882a593Smuzhiyun int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5446*4882a593Smuzhiyun {
5447*4882a593Smuzhiyun struct extent_map *em;
5448*4882a593Smuzhiyun struct map_lookup *map;
5449*4882a593Smuzhiyun int ret;
5450*4882a593Smuzhiyun
5451*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, logical, len);
5452*4882a593Smuzhiyun if (IS_ERR(em))
5453*4882a593Smuzhiyun /*
5454*4882a593Smuzhiyun * We could return errors for these cases, but that could get
5455*4882a593Smuzhiyun * ugly and we'd probably do the same thing which is just not do
5456*4882a593Smuzhiyun * anything else and exit, so return 1 so the callers don't try
5457*4882a593Smuzhiyun * to use other copies.
5458*4882a593Smuzhiyun */
5459*4882a593Smuzhiyun return 1;
5460*4882a593Smuzhiyun
5461*4882a593Smuzhiyun map = em->map_lookup;
5462*4882a593Smuzhiyun if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5463*4882a593Smuzhiyun ret = map->num_stripes;
5464*4882a593Smuzhiyun else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5465*4882a593Smuzhiyun ret = map->sub_stripes;
5466*4882a593Smuzhiyun else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5467*4882a593Smuzhiyun ret = 2;
5468*4882a593Smuzhiyun else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5469*4882a593Smuzhiyun /*
5470*4882a593Smuzhiyun * There could be two corrupted data stripes, we need
5471*4882a593Smuzhiyun * to loop retry in order to rebuild the correct data.
5472*4882a593Smuzhiyun *
5473*4882a593Smuzhiyun * Fail a stripe at a time on every retry except the
5474*4882a593Smuzhiyun * stripe under reconstruction.
5475*4882a593Smuzhiyun */
5476*4882a593Smuzhiyun ret = map->num_stripes;
5477*4882a593Smuzhiyun else
5478*4882a593Smuzhiyun ret = 1;
5479*4882a593Smuzhiyun free_extent_map(em);
5480*4882a593Smuzhiyun
5481*4882a593Smuzhiyun down_read(&fs_info->dev_replace.rwsem);
5482*4882a593Smuzhiyun if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5483*4882a593Smuzhiyun fs_info->dev_replace.tgtdev)
5484*4882a593Smuzhiyun ret++;
5485*4882a593Smuzhiyun up_read(&fs_info->dev_replace.rwsem);
5486*4882a593Smuzhiyun
5487*4882a593Smuzhiyun return ret;
5488*4882a593Smuzhiyun }
5489*4882a593Smuzhiyun
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5490*4882a593Smuzhiyun unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5491*4882a593Smuzhiyun u64 logical)
5492*4882a593Smuzhiyun {
5493*4882a593Smuzhiyun struct extent_map *em;
5494*4882a593Smuzhiyun struct map_lookup *map;
5495*4882a593Smuzhiyun unsigned long len = fs_info->sectorsize;
5496*4882a593Smuzhiyun
5497*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, logical, len);
5498*4882a593Smuzhiyun
5499*4882a593Smuzhiyun if (!WARN_ON(IS_ERR(em))) {
5500*4882a593Smuzhiyun map = em->map_lookup;
5501*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5502*4882a593Smuzhiyun len = map->stripe_len * nr_data_stripes(map);
5503*4882a593Smuzhiyun free_extent_map(em);
5504*4882a593Smuzhiyun }
5505*4882a593Smuzhiyun return len;
5506*4882a593Smuzhiyun }
5507*4882a593Smuzhiyun
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5508*4882a593Smuzhiyun int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5509*4882a593Smuzhiyun {
5510*4882a593Smuzhiyun struct extent_map *em;
5511*4882a593Smuzhiyun struct map_lookup *map;
5512*4882a593Smuzhiyun int ret = 0;
5513*4882a593Smuzhiyun
5514*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, logical, len);
5515*4882a593Smuzhiyun
5516*4882a593Smuzhiyun if(!WARN_ON(IS_ERR(em))) {
5517*4882a593Smuzhiyun map = em->map_lookup;
5518*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5519*4882a593Smuzhiyun ret = 1;
5520*4882a593Smuzhiyun free_extent_map(em);
5521*4882a593Smuzhiyun }
5522*4882a593Smuzhiyun return ret;
5523*4882a593Smuzhiyun }
5524*4882a593Smuzhiyun
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int dev_replace_is_ongoing)5525*4882a593Smuzhiyun static int find_live_mirror(struct btrfs_fs_info *fs_info,
5526*4882a593Smuzhiyun struct map_lookup *map, int first,
5527*4882a593Smuzhiyun int dev_replace_is_ongoing)
5528*4882a593Smuzhiyun {
5529*4882a593Smuzhiyun int i;
5530*4882a593Smuzhiyun int num_stripes;
5531*4882a593Smuzhiyun int preferred_mirror;
5532*4882a593Smuzhiyun int tolerance;
5533*4882a593Smuzhiyun struct btrfs_device *srcdev;
5534*4882a593Smuzhiyun
5535*4882a593Smuzhiyun ASSERT((map->type &
5536*4882a593Smuzhiyun (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5537*4882a593Smuzhiyun
5538*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5539*4882a593Smuzhiyun num_stripes = map->sub_stripes;
5540*4882a593Smuzhiyun else
5541*4882a593Smuzhiyun num_stripes = map->num_stripes;
5542*4882a593Smuzhiyun
5543*4882a593Smuzhiyun preferred_mirror = first + current->pid % num_stripes;
5544*4882a593Smuzhiyun
5545*4882a593Smuzhiyun if (dev_replace_is_ongoing &&
5546*4882a593Smuzhiyun fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5547*4882a593Smuzhiyun BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5548*4882a593Smuzhiyun srcdev = fs_info->dev_replace.srcdev;
5549*4882a593Smuzhiyun else
5550*4882a593Smuzhiyun srcdev = NULL;
5551*4882a593Smuzhiyun
5552*4882a593Smuzhiyun /*
5553*4882a593Smuzhiyun * try to avoid the drive that is the source drive for a
5554*4882a593Smuzhiyun * dev-replace procedure, only choose it if no other non-missing
5555*4882a593Smuzhiyun * mirror is available
5556*4882a593Smuzhiyun */
5557*4882a593Smuzhiyun for (tolerance = 0; tolerance < 2; tolerance++) {
5558*4882a593Smuzhiyun if (map->stripes[preferred_mirror].dev->bdev &&
5559*4882a593Smuzhiyun (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5560*4882a593Smuzhiyun return preferred_mirror;
5561*4882a593Smuzhiyun for (i = first; i < first + num_stripes; i++) {
5562*4882a593Smuzhiyun if (map->stripes[i].dev->bdev &&
5563*4882a593Smuzhiyun (tolerance || map->stripes[i].dev != srcdev))
5564*4882a593Smuzhiyun return i;
5565*4882a593Smuzhiyun }
5566*4882a593Smuzhiyun }
5567*4882a593Smuzhiyun
5568*4882a593Smuzhiyun /* we couldn't find one that doesn't fail. Just return something
5569*4882a593Smuzhiyun * and the io error handling code will clean up eventually
5570*4882a593Smuzhiyun */
5571*4882a593Smuzhiyun return preferred_mirror;
5572*4882a593Smuzhiyun }
5573*4882a593Smuzhiyun
5574*4882a593Smuzhiyun /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_bio * bbio,int num_stripes)5575*4882a593Smuzhiyun static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5576*4882a593Smuzhiyun {
5577*4882a593Smuzhiyun int i;
5578*4882a593Smuzhiyun int again = 1;
5579*4882a593Smuzhiyun
5580*4882a593Smuzhiyun while (again) {
5581*4882a593Smuzhiyun again = 0;
5582*4882a593Smuzhiyun for (i = 0; i < num_stripes - 1; i++) {
5583*4882a593Smuzhiyun /* Swap if parity is on a smaller index */
5584*4882a593Smuzhiyun if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5585*4882a593Smuzhiyun swap(bbio->stripes[i], bbio->stripes[i + 1]);
5586*4882a593Smuzhiyun swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5587*4882a593Smuzhiyun again = 1;
5588*4882a593Smuzhiyun }
5589*4882a593Smuzhiyun }
5590*4882a593Smuzhiyun }
5591*4882a593Smuzhiyun }
5592*4882a593Smuzhiyun
alloc_btrfs_bio(int total_stripes,int real_stripes)5593*4882a593Smuzhiyun static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5594*4882a593Smuzhiyun {
5595*4882a593Smuzhiyun struct btrfs_bio *bbio = kzalloc(
5596*4882a593Smuzhiyun /* the size of the btrfs_bio */
5597*4882a593Smuzhiyun sizeof(struct btrfs_bio) +
5598*4882a593Smuzhiyun /* plus the variable array for the stripes */
5599*4882a593Smuzhiyun sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5600*4882a593Smuzhiyun /* plus the variable array for the tgt dev */
5601*4882a593Smuzhiyun sizeof(int) * (real_stripes) +
5602*4882a593Smuzhiyun /*
5603*4882a593Smuzhiyun * plus the raid_map, which includes both the tgt dev
5604*4882a593Smuzhiyun * and the stripes
5605*4882a593Smuzhiyun */
5606*4882a593Smuzhiyun sizeof(u64) * (total_stripes),
5607*4882a593Smuzhiyun GFP_NOFS|__GFP_NOFAIL);
5608*4882a593Smuzhiyun
5609*4882a593Smuzhiyun atomic_set(&bbio->error, 0);
5610*4882a593Smuzhiyun refcount_set(&bbio->refs, 1);
5611*4882a593Smuzhiyun
5612*4882a593Smuzhiyun bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5613*4882a593Smuzhiyun bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5614*4882a593Smuzhiyun
5615*4882a593Smuzhiyun return bbio;
5616*4882a593Smuzhiyun }
5617*4882a593Smuzhiyun
btrfs_get_bbio(struct btrfs_bio * bbio)5618*4882a593Smuzhiyun void btrfs_get_bbio(struct btrfs_bio *bbio)
5619*4882a593Smuzhiyun {
5620*4882a593Smuzhiyun WARN_ON(!refcount_read(&bbio->refs));
5621*4882a593Smuzhiyun refcount_inc(&bbio->refs);
5622*4882a593Smuzhiyun }
5623*4882a593Smuzhiyun
btrfs_put_bbio(struct btrfs_bio * bbio)5624*4882a593Smuzhiyun void btrfs_put_bbio(struct btrfs_bio *bbio)
5625*4882a593Smuzhiyun {
5626*4882a593Smuzhiyun if (!bbio)
5627*4882a593Smuzhiyun return;
5628*4882a593Smuzhiyun if (refcount_dec_and_test(&bbio->refs))
5629*4882a593Smuzhiyun kfree(bbio);
5630*4882a593Smuzhiyun }
5631*4882a593Smuzhiyun
5632*4882a593Smuzhiyun /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5633*4882a593Smuzhiyun /*
5634*4882a593Smuzhiyun * Please note that, discard won't be sent to target device of device
5635*4882a593Smuzhiyun * replace.
5636*4882a593Smuzhiyun */
__btrfs_map_block_for_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 * length_ret,struct btrfs_bio ** bbio_ret)5637*4882a593Smuzhiyun static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5638*4882a593Smuzhiyun u64 logical, u64 *length_ret,
5639*4882a593Smuzhiyun struct btrfs_bio **bbio_ret)
5640*4882a593Smuzhiyun {
5641*4882a593Smuzhiyun struct extent_map *em;
5642*4882a593Smuzhiyun struct map_lookup *map;
5643*4882a593Smuzhiyun struct btrfs_bio *bbio;
5644*4882a593Smuzhiyun u64 length = *length_ret;
5645*4882a593Smuzhiyun u64 offset;
5646*4882a593Smuzhiyun u64 stripe_nr;
5647*4882a593Smuzhiyun u64 stripe_nr_end;
5648*4882a593Smuzhiyun u64 stripe_end_offset;
5649*4882a593Smuzhiyun u64 stripe_cnt;
5650*4882a593Smuzhiyun u64 stripe_len;
5651*4882a593Smuzhiyun u64 stripe_offset;
5652*4882a593Smuzhiyun u64 num_stripes;
5653*4882a593Smuzhiyun u32 stripe_index;
5654*4882a593Smuzhiyun u32 factor = 0;
5655*4882a593Smuzhiyun u32 sub_stripes = 0;
5656*4882a593Smuzhiyun u64 stripes_per_dev = 0;
5657*4882a593Smuzhiyun u32 remaining_stripes = 0;
5658*4882a593Smuzhiyun u32 last_stripe = 0;
5659*4882a593Smuzhiyun int ret = 0;
5660*4882a593Smuzhiyun int i;
5661*4882a593Smuzhiyun
5662*4882a593Smuzhiyun /* discard always return a bbio */
5663*4882a593Smuzhiyun ASSERT(bbio_ret);
5664*4882a593Smuzhiyun
5665*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, logical, length);
5666*4882a593Smuzhiyun if (IS_ERR(em))
5667*4882a593Smuzhiyun return PTR_ERR(em);
5668*4882a593Smuzhiyun
5669*4882a593Smuzhiyun map = em->map_lookup;
5670*4882a593Smuzhiyun /* we don't discard raid56 yet */
5671*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5672*4882a593Smuzhiyun ret = -EOPNOTSUPP;
5673*4882a593Smuzhiyun goto out;
5674*4882a593Smuzhiyun }
5675*4882a593Smuzhiyun
5676*4882a593Smuzhiyun offset = logical - em->start;
5677*4882a593Smuzhiyun length = min_t(u64, em->start + em->len - logical, length);
5678*4882a593Smuzhiyun *length_ret = length;
5679*4882a593Smuzhiyun
5680*4882a593Smuzhiyun stripe_len = map->stripe_len;
5681*4882a593Smuzhiyun /*
5682*4882a593Smuzhiyun * stripe_nr counts the total number of stripes we have to stride
5683*4882a593Smuzhiyun * to get to this block
5684*4882a593Smuzhiyun */
5685*4882a593Smuzhiyun stripe_nr = div64_u64(offset, stripe_len);
5686*4882a593Smuzhiyun
5687*4882a593Smuzhiyun /* stripe_offset is the offset of this block in its stripe */
5688*4882a593Smuzhiyun stripe_offset = offset - stripe_nr * stripe_len;
5689*4882a593Smuzhiyun
5690*4882a593Smuzhiyun stripe_nr_end = round_up(offset + length, map->stripe_len);
5691*4882a593Smuzhiyun stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5692*4882a593Smuzhiyun stripe_cnt = stripe_nr_end - stripe_nr;
5693*4882a593Smuzhiyun stripe_end_offset = stripe_nr_end * map->stripe_len -
5694*4882a593Smuzhiyun (offset + length);
5695*4882a593Smuzhiyun /*
5696*4882a593Smuzhiyun * after this, stripe_nr is the number of stripes on this
5697*4882a593Smuzhiyun * device we have to walk to find the data, and stripe_index is
5698*4882a593Smuzhiyun * the number of our device in the stripe array
5699*4882a593Smuzhiyun */
5700*4882a593Smuzhiyun num_stripes = 1;
5701*4882a593Smuzhiyun stripe_index = 0;
5702*4882a593Smuzhiyun if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5703*4882a593Smuzhiyun BTRFS_BLOCK_GROUP_RAID10)) {
5704*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5705*4882a593Smuzhiyun sub_stripes = 1;
5706*4882a593Smuzhiyun else
5707*4882a593Smuzhiyun sub_stripes = map->sub_stripes;
5708*4882a593Smuzhiyun
5709*4882a593Smuzhiyun factor = map->num_stripes / sub_stripes;
5710*4882a593Smuzhiyun num_stripes = min_t(u64, map->num_stripes,
5711*4882a593Smuzhiyun sub_stripes * stripe_cnt);
5712*4882a593Smuzhiyun stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5713*4882a593Smuzhiyun stripe_index *= sub_stripes;
5714*4882a593Smuzhiyun stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5715*4882a593Smuzhiyun &remaining_stripes);
5716*4882a593Smuzhiyun div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5717*4882a593Smuzhiyun last_stripe *= sub_stripes;
5718*4882a593Smuzhiyun } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5719*4882a593Smuzhiyun BTRFS_BLOCK_GROUP_DUP)) {
5720*4882a593Smuzhiyun num_stripes = map->num_stripes;
5721*4882a593Smuzhiyun } else {
5722*4882a593Smuzhiyun stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5723*4882a593Smuzhiyun &stripe_index);
5724*4882a593Smuzhiyun }
5725*4882a593Smuzhiyun
5726*4882a593Smuzhiyun bbio = alloc_btrfs_bio(num_stripes, 0);
5727*4882a593Smuzhiyun if (!bbio) {
5728*4882a593Smuzhiyun ret = -ENOMEM;
5729*4882a593Smuzhiyun goto out;
5730*4882a593Smuzhiyun }
5731*4882a593Smuzhiyun
5732*4882a593Smuzhiyun for (i = 0; i < num_stripes; i++) {
5733*4882a593Smuzhiyun bbio->stripes[i].physical =
5734*4882a593Smuzhiyun map->stripes[stripe_index].physical +
5735*4882a593Smuzhiyun stripe_offset + stripe_nr * map->stripe_len;
5736*4882a593Smuzhiyun bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5737*4882a593Smuzhiyun
5738*4882a593Smuzhiyun if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5739*4882a593Smuzhiyun BTRFS_BLOCK_GROUP_RAID10)) {
5740*4882a593Smuzhiyun bbio->stripes[i].length = stripes_per_dev *
5741*4882a593Smuzhiyun map->stripe_len;
5742*4882a593Smuzhiyun
5743*4882a593Smuzhiyun if (i / sub_stripes < remaining_stripes)
5744*4882a593Smuzhiyun bbio->stripes[i].length +=
5745*4882a593Smuzhiyun map->stripe_len;
5746*4882a593Smuzhiyun
5747*4882a593Smuzhiyun /*
5748*4882a593Smuzhiyun * Special for the first stripe and
5749*4882a593Smuzhiyun * the last stripe:
5750*4882a593Smuzhiyun *
5751*4882a593Smuzhiyun * |-------|...|-------|
5752*4882a593Smuzhiyun * |----------|
5753*4882a593Smuzhiyun * off end_off
5754*4882a593Smuzhiyun */
5755*4882a593Smuzhiyun if (i < sub_stripes)
5756*4882a593Smuzhiyun bbio->stripes[i].length -=
5757*4882a593Smuzhiyun stripe_offset;
5758*4882a593Smuzhiyun
5759*4882a593Smuzhiyun if (stripe_index >= last_stripe &&
5760*4882a593Smuzhiyun stripe_index <= (last_stripe +
5761*4882a593Smuzhiyun sub_stripes - 1))
5762*4882a593Smuzhiyun bbio->stripes[i].length -=
5763*4882a593Smuzhiyun stripe_end_offset;
5764*4882a593Smuzhiyun
5765*4882a593Smuzhiyun if (i == sub_stripes - 1)
5766*4882a593Smuzhiyun stripe_offset = 0;
5767*4882a593Smuzhiyun } else {
5768*4882a593Smuzhiyun bbio->stripes[i].length = length;
5769*4882a593Smuzhiyun }
5770*4882a593Smuzhiyun
5771*4882a593Smuzhiyun stripe_index++;
5772*4882a593Smuzhiyun if (stripe_index == map->num_stripes) {
5773*4882a593Smuzhiyun stripe_index = 0;
5774*4882a593Smuzhiyun stripe_nr++;
5775*4882a593Smuzhiyun }
5776*4882a593Smuzhiyun }
5777*4882a593Smuzhiyun
5778*4882a593Smuzhiyun *bbio_ret = bbio;
5779*4882a593Smuzhiyun bbio->map_type = map->type;
5780*4882a593Smuzhiyun bbio->num_stripes = num_stripes;
5781*4882a593Smuzhiyun out:
5782*4882a593Smuzhiyun free_extent_map(em);
5783*4882a593Smuzhiyun return ret;
5784*4882a593Smuzhiyun }
5785*4882a593Smuzhiyun
5786*4882a593Smuzhiyun /*
5787*4882a593Smuzhiyun * In dev-replace case, for repair case (that's the only case where the mirror
5788*4882a593Smuzhiyun * is selected explicitly when calling btrfs_map_block), blocks left of the
5789*4882a593Smuzhiyun * left cursor can also be read from the target drive.
5790*4882a593Smuzhiyun *
5791*4882a593Smuzhiyun * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5792*4882a593Smuzhiyun * array of stripes.
5793*4882a593Smuzhiyun * For READ, it also needs to be supported using the same mirror number.
5794*4882a593Smuzhiyun *
5795*4882a593Smuzhiyun * If the requested block is not left of the left cursor, EIO is returned. This
5796*4882a593Smuzhiyun * can happen because btrfs_num_copies() returns one more in the dev-replace
5797*4882a593Smuzhiyun * case.
5798*4882a593Smuzhiyun */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)5799*4882a593Smuzhiyun static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5800*4882a593Smuzhiyun u64 logical, u64 length,
5801*4882a593Smuzhiyun u64 srcdev_devid, int *mirror_num,
5802*4882a593Smuzhiyun u64 *physical)
5803*4882a593Smuzhiyun {
5804*4882a593Smuzhiyun struct btrfs_bio *bbio = NULL;
5805*4882a593Smuzhiyun int num_stripes;
5806*4882a593Smuzhiyun int index_srcdev = 0;
5807*4882a593Smuzhiyun int found = 0;
5808*4882a593Smuzhiyun u64 physical_of_found = 0;
5809*4882a593Smuzhiyun int i;
5810*4882a593Smuzhiyun int ret = 0;
5811*4882a593Smuzhiyun
5812*4882a593Smuzhiyun ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5813*4882a593Smuzhiyun logical, &length, &bbio, 0, 0);
5814*4882a593Smuzhiyun if (ret) {
5815*4882a593Smuzhiyun ASSERT(bbio == NULL);
5816*4882a593Smuzhiyun return ret;
5817*4882a593Smuzhiyun }
5818*4882a593Smuzhiyun
5819*4882a593Smuzhiyun num_stripes = bbio->num_stripes;
5820*4882a593Smuzhiyun if (*mirror_num > num_stripes) {
5821*4882a593Smuzhiyun /*
5822*4882a593Smuzhiyun * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5823*4882a593Smuzhiyun * that means that the requested area is not left of the left
5824*4882a593Smuzhiyun * cursor
5825*4882a593Smuzhiyun */
5826*4882a593Smuzhiyun btrfs_put_bbio(bbio);
5827*4882a593Smuzhiyun return -EIO;
5828*4882a593Smuzhiyun }
5829*4882a593Smuzhiyun
5830*4882a593Smuzhiyun /*
5831*4882a593Smuzhiyun * process the rest of the function using the mirror_num of the source
5832*4882a593Smuzhiyun * drive. Therefore look it up first. At the end, patch the device
5833*4882a593Smuzhiyun * pointer to the one of the target drive.
5834*4882a593Smuzhiyun */
5835*4882a593Smuzhiyun for (i = 0; i < num_stripes; i++) {
5836*4882a593Smuzhiyun if (bbio->stripes[i].dev->devid != srcdev_devid)
5837*4882a593Smuzhiyun continue;
5838*4882a593Smuzhiyun
5839*4882a593Smuzhiyun /*
5840*4882a593Smuzhiyun * In case of DUP, in order to keep it simple, only add the
5841*4882a593Smuzhiyun * mirror with the lowest physical address
5842*4882a593Smuzhiyun */
5843*4882a593Smuzhiyun if (found &&
5844*4882a593Smuzhiyun physical_of_found <= bbio->stripes[i].physical)
5845*4882a593Smuzhiyun continue;
5846*4882a593Smuzhiyun
5847*4882a593Smuzhiyun index_srcdev = i;
5848*4882a593Smuzhiyun found = 1;
5849*4882a593Smuzhiyun physical_of_found = bbio->stripes[i].physical;
5850*4882a593Smuzhiyun }
5851*4882a593Smuzhiyun
5852*4882a593Smuzhiyun btrfs_put_bbio(bbio);
5853*4882a593Smuzhiyun
5854*4882a593Smuzhiyun ASSERT(found);
5855*4882a593Smuzhiyun if (!found)
5856*4882a593Smuzhiyun return -EIO;
5857*4882a593Smuzhiyun
5858*4882a593Smuzhiyun *mirror_num = index_srcdev + 1;
5859*4882a593Smuzhiyun *physical = physical_of_found;
5860*4882a593Smuzhiyun return ret;
5861*4882a593Smuzhiyun }
5862*4882a593Smuzhiyun
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_bio ** bbio_ret,struct btrfs_dev_replace * dev_replace,int * num_stripes_ret,int * max_errors_ret)5863*4882a593Smuzhiyun static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5864*4882a593Smuzhiyun struct btrfs_bio **bbio_ret,
5865*4882a593Smuzhiyun struct btrfs_dev_replace *dev_replace,
5866*4882a593Smuzhiyun int *num_stripes_ret, int *max_errors_ret)
5867*4882a593Smuzhiyun {
5868*4882a593Smuzhiyun struct btrfs_bio *bbio = *bbio_ret;
5869*4882a593Smuzhiyun u64 srcdev_devid = dev_replace->srcdev->devid;
5870*4882a593Smuzhiyun int tgtdev_indexes = 0;
5871*4882a593Smuzhiyun int num_stripes = *num_stripes_ret;
5872*4882a593Smuzhiyun int max_errors = *max_errors_ret;
5873*4882a593Smuzhiyun int i;
5874*4882a593Smuzhiyun
5875*4882a593Smuzhiyun if (op == BTRFS_MAP_WRITE) {
5876*4882a593Smuzhiyun int index_where_to_add;
5877*4882a593Smuzhiyun
5878*4882a593Smuzhiyun /*
5879*4882a593Smuzhiyun * duplicate the write operations while the dev replace
5880*4882a593Smuzhiyun * procedure is running. Since the copying of the old disk to
5881*4882a593Smuzhiyun * the new disk takes place at run time while the filesystem is
5882*4882a593Smuzhiyun * mounted writable, the regular write operations to the old
5883*4882a593Smuzhiyun * disk have to be duplicated to go to the new disk as well.
5884*4882a593Smuzhiyun *
5885*4882a593Smuzhiyun * Note that device->missing is handled by the caller, and that
5886*4882a593Smuzhiyun * the write to the old disk is already set up in the stripes
5887*4882a593Smuzhiyun * array.
5888*4882a593Smuzhiyun */
5889*4882a593Smuzhiyun index_where_to_add = num_stripes;
5890*4882a593Smuzhiyun for (i = 0; i < num_stripes; i++) {
5891*4882a593Smuzhiyun if (bbio->stripes[i].dev->devid == srcdev_devid) {
5892*4882a593Smuzhiyun /* write to new disk, too */
5893*4882a593Smuzhiyun struct btrfs_bio_stripe *new =
5894*4882a593Smuzhiyun bbio->stripes + index_where_to_add;
5895*4882a593Smuzhiyun struct btrfs_bio_stripe *old =
5896*4882a593Smuzhiyun bbio->stripes + i;
5897*4882a593Smuzhiyun
5898*4882a593Smuzhiyun new->physical = old->physical;
5899*4882a593Smuzhiyun new->length = old->length;
5900*4882a593Smuzhiyun new->dev = dev_replace->tgtdev;
5901*4882a593Smuzhiyun bbio->tgtdev_map[i] = index_where_to_add;
5902*4882a593Smuzhiyun index_where_to_add++;
5903*4882a593Smuzhiyun max_errors++;
5904*4882a593Smuzhiyun tgtdev_indexes++;
5905*4882a593Smuzhiyun }
5906*4882a593Smuzhiyun }
5907*4882a593Smuzhiyun num_stripes = index_where_to_add;
5908*4882a593Smuzhiyun } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5909*4882a593Smuzhiyun int index_srcdev = 0;
5910*4882a593Smuzhiyun int found = 0;
5911*4882a593Smuzhiyun u64 physical_of_found = 0;
5912*4882a593Smuzhiyun
5913*4882a593Smuzhiyun /*
5914*4882a593Smuzhiyun * During the dev-replace procedure, the target drive can also
5915*4882a593Smuzhiyun * be used to read data in case it is needed to repair a corrupt
5916*4882a593Smuzhiyun * block elsewhere. This is possible if the requested area is
5917*4882a593Smuzhiyun * left of the left cursor. In this area, the target drive is a
5918*4882a593Smuzhiyun * full copy of the source drive.
5919*4882a593Smuzhiyun */
5920*4882a593Smuzhiyun for (i = 0; i < num_stripes; i++) {
5921*4882a593Smuzhiyun if (bbio->stripes[i].dev->devid == srcdev_devid) {
5922*4882a593Smuzhiyun /*
5923*4882a593Smuzhiyun * In case of DUP, in order to keep it simple,
5924*4882a593Smuzhiyun * only add the mirror with the lowest physical
5925*4882a593Smuzhiyun * address
5926*4882a593Smuzhiyun */
5927*4882a593Smuzhiyun if (found &&
5928*4882a593Smuzhiyun physical_of_found <=
5929*4882a593Smuzhiyun bbio->stripes[i].physical)
5930*4882a593Smuzhiyun continue;
5931*4882a593Smuzhiyun index_srcdev = i;
5932*4882a593Smuzhiyun found = 1;
5933*4882a593Smuzhiyun physical_of_found = bbio->stripes[i].physical;
5934*4882a593Smuzhiyun }
5935*4882a593Smuzhiyun }
5936*4882a593Smuzhiyun if (found) {
5937*4882a593Smuzhiyun struct btrfs_bio_stripe *tgtdev_stripe =
5938*4882a593Smuzhiyun bbio->stripes + num_stripes;
5939*4882a593Smuzhiyun
5940*4882a593Smuzhiyun tgtdev_stripe->physical = physical_of_found;
5941*4882a593Smuzhiyun tgtdev_stripe->length =
5942*4882a593Smuzhiyun bbio->stripes[index_srcdev].length;
5943*4882a593Smuzhiyun tgtdev_stripe->dev = dev_replace->tgtdev;
5944*4882a593Smuzhiyun bbio->tgtdev_map[index_srcdev] = num_stripes;
5945*4882a593Smuzhiyun
5946*4882a593Smuzhiyun tgtdev_indexes++;
5947*4882a593Smuzhiyun num_stripes++;
5948*4882a593Smuzhiyun }
5949*4882a593Smuzhiyun }
5950*4882a593Smuzhiyun
5951*4882a593Smuzhiyun *num_stripes_ret = num_stripes;
5952*4882a593Smuzhiyun *max_errors_ret = max_errors;
5953*4882a593Smuzhiyun bbio->num_tgtdevs = tgtdev_indexes;
5954*4882a593Smuzhiyun *bbio_ret = bbio;
5955*4882a593Smuzhiyun }
5956*4882a593Smuzhiyun
need_full_stripe(enum btrfs_map_op op)5957*4882a593Smuzhiyun static bool need_full_stripe(enum btrfs_map_op op)
5958*4882a593Smuzhiyun {
5959*4882a593Smuzhiyun return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5960*4882a593Smuzhiyun }
5961*4882a593Smuzhiyun
5962*4882a593Smuzhiyun /*
5963*4882a593Smuzhiyun * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5964*4882a593Smuzhiyun * tuple. This information is used to calculate how big a
5965*4882a593Smuzhiyun * particular bio can get before it straddles a stripe.
5966*4882a593Smuzhiyun *
5967*4882a593Smuzhiyun * @fs_info - the filesystem
5968*4882a593Smuzhiyun * @logical - address that we want to figure out the geometry of
5969*4882a593Smuzhiyun * @len - the length of IO we are going to perform, starting at @logical
5970*4882a593Smuzhiyun * @op - type of operation - write or read
5971*4882a593Smuzhiyun * @io_geom - pointer used to return values
5972*4882a593Smuzhiyun *
5973*4882a593Smuzhiyun * Returns < 0 in case a chunk for the given logical address cannot be found,
5974*4882a593Smuzhiyun * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
5975*4882a593Smuzhiyun */
btrfs_get_io_geometry(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 len,struct btrfs_io_geometry * io_geom)5976*4882a593Smuzhiyun int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5977*4882a593Smuzhiyun u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
5978*4882a593Smuzhiyun {
5979*4882a593Smuzhiyun struct extent_map *em;
5980*4882a593Smuzhiyun struct map_lookup *map;
5981*4882a593Smuzhiyun u64 offset;
5982*4882a593Smuzhiyun u64 stripe_offset;
5983*4882a593Smuzhiyun u64 stripe_nr;
5984*4882a593Smuzhiyun u64 stripe_len;
5985*4882a593Smuzhiyun u64 raid56_full_stripe_start = (u64)-1;
5986*4882a593Smuzhiyun int data_stripes;
5987*4882a593Smuzhiyun int ret = 0;
5988*4882a593Smuzhiyun
5989*4882a593Smuzhiyun ASSERT(op != BTRFS_MAP_DISCARD);
5990*4882a593Smuzhiyun
5991*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, logical, len);
5992*4882a593Smuzhiyun if (IS_ERR(em))
5993*4882a593Smuzhiyun return PTR_ERR(em);
5994*4882a593Smuzhiyun
5995*4882a593Smuzhiyun map = em->map_lookup;
5996*4882a593Smuzhiyun /* Offset of this logical address in the chunk */
5997*4882a593Smuzhiyun offset = logical - em->start;
5998*4882a593Smuzhiyun /* Len of a stripe in a chunk */
5999*4882a593Smuzhiyun stripe_len = map->stripe_len;
6000*4882a593Smuzhiyun /* Stripe wher this block falls in */
6001*4882a593Smuzhiyun stripe_nr = div64_u64(offset, stripe_len);
6002*4882a593Smuzhiyun /* Offset of stripe in the chunk */
6003*4882a593Smuzhiyun stripe_offset = stripe_nr * stripe_len;
6004*4882a593Smuzhiyun if (offset < stripe_offset) {
6005*4882a593Smuzhiyun btrfs_crit(fs_info,
6006*4882a593Smuzhiyun "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6007*4882a593Smuzhiyun stripe_offset, offset, em->start, logical, stripe_len);
6008*4882a593Smuzhiyun ret = -EINVAL;
6009*4882a593Smuzhiyun goto out;
6010*4882a593Smuzhiyun }
6011*4882a593Smuzhiyun
6012*4882a593Smuzhiyun /* stripe_offset is the offset of this block in its stripe */
6013*4882a593Smuzhiyun stripe_offset = offset - stripe_offset;
6014*4882a593Smuzhiyun data_stripes = nr_data_stripes(map);
6015*4882a593Smuzhiyun
6016*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6017*4882a593Smuzhiyun u64 max_len = stripe_len - stripe_offset;
6018*4882a593Smuzhiyun
6019*4882a593Smuzhiyun /*
6020*4882a593Smuzhiyun * In case of raid56, we need to know the stripe aligned start
6021*4882a593Smuzhiyun */
6022*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6023*4882a593Smuzhiyun unsigned long full_stripe_len = stripe_len * data_stripes;
6024*4882a593Smuzhiyun raid56_full_stripe_start = offset;
6025*4882a593Smuzhiyun
6026*4882a593Smuzhiyun /*
6027*4882a593Smuzhiyun * Allow a write of a full stripe, but make sure we
6028*4882a593Smuzhiyun * don't allow straddling of stripes
6029*4882a593Smuzhiyun */
6030*4882a593Smuzhiyun raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6031*4882a593Smuzhiyun full_stripe_len);
6032*4882a593Smuzhiyun raid56_full_stripe_start *= full_stripe_len;
6033*4882a593Smuzhiyun
6034*4882a593Smuzhiyun /*
6035*4882a593Smuzhiyun * For writes to RAID[56], allow a full stripeset across
6036*4882a593Smuzhiyun * all disks. For other RAID types and for RAID[56]
6037*4882a593Smuzhiyun * reads, just allow a single stripe (on a single disk).
6038*4882a593Smuzhiyun */
6039*4882a593Smuzhiyun if (op == BTRFS_MAP_WRITE) {
6040*4882a593Smuzhiyun max_len = stripe_len * data_stripes -
6041*4882a593Smuzhiyun (offset - raid56_full_stripe_start);
6042*4882a593Smuzhiyun }
6043*4882a593Smuzhiyun }
6044*4882a593Smuzhiyun len = min_t(u64, em->len - offset, max_len);
6045*4882a593Smuzhiyun } else {
6046*4882a593Smuzhiyun len = em->len - offset;
6047*4882a593Smuzhiyun }
6048*4882a593Smuzhiyun
6049*4882a593Smuzhiyun io_geom->len = len;
6050*4882a593Smuzhiyun io_geom->offset = offset;
6051*4882a593Smuzhiyun io_geom->stripe_len = stripe_len;
6052*4882a593Smuzhiyun io_geom->stripe_nr = stripe_nr;
6053*4882a593Smuzhiyun io_geom->stripe_offset = stripe_offset;
6054*4882a593Smuzhiyun io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6055*4882a593Smuzhiyun
6056*4882a593Smuzhiyun out:
6057*4882a593Smuzhiyun /* once for us */
6058*4882a593Smuzhiyun free_extent_map(em);
6059*4882a593Smuzhiyun return ret;
6060*4882a593Smuzhiyun }
6061*4882a593Smuzhiyun
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num,int need_raid_map)6062*4882a593Smuzhiyun static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6063*4882a593Smuzhiyun enum btrfs_map_op op,
6064*4882a593Smuzhiyun u64 logical, u64 *length,
6065*4882a593Smuzhiyun struct btrfs_bio **bbio_ret,
6066*4882a593Smuzhiyun int mirror_num, int need_raid_map)
6067*4882a593Smuzhiyun {
6068*4882a593Smuzhiyun struct extent_map *em;
6069*4882a593Smuzhiyun struct map_lookup *map;
6070*4882a593Smuzhiyun u64 stripe_offset;
6071*4882a593Smuzhiyun u64 stripe_nr;
6072*4882a593Smuzhiyun u64 stripe_len;
6073*4882a593Smuzhiyun u32 stripe_index;
6074*4882a593Smuzhiyun int data_stripes;
6075*4882a593Smuzhiyun int i;
6076*4882a593Smuzhiyun int ret = 0;
6077*4882a593Smuzhiyun int num_stripes;
6078*4882a593Smuzhiyun int max_errors = 0;
6079*4882a593Smuzhiyun int tgtdev_indexes = 0;
6080*4882a593Smuzhiyun struct btrfs_bio *bbio = NULL;
6081*4882a593Smuzhiyun struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6082*4882a593Smuzhiyun int dev_replace_is_ongoing = 0;
6083*4882a593Smuzhiyun int num_alloc_stripes;
6084*4882a593Smuzhiyun int patch_the_first_stripe_for_dev_replace = 0;
6085*4882a593Smuzhiyun u64 physical_to_patch_in_first_stripe = 0;
6086*4882a593Smuzhiyun u64 raid56_full_stripe_start = (u64)-1;
6087*4882a593Smuzhiyun struct btrfs_io_geometry geom;
6088*4882a593Smuzhiyun
6089*4882a593Smuzhiyun ASSERT(bbio_ret);
6090*4882a593Smuzhiyun ASSERT(op != BTRFS_MAP_DISCARD);
6091*4882a593Smuzhiyun
6092*4882a593Smuzhiyun ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
6093*4882a593Smuzhiyun if (ret < 0)
6094*4882a593Smuzhiyun return ret;
6095*4882a593Smuzhiyun
6096*4882a593Smuzhiyun em = btrfs_get_chunk_map(fs_info, logical, *length);
6097*4882a593Smuzhiyun ASSERT(!IS_ERR(em));
6098*4882a593Smuzhiyun map = em->map_lookup;
6099*4882a593Smuzhiyun
6100*4882a593Smuzhiyun *length = geom.len;
6101*4882a593Smuzhiyun stripe_len = geom.stripe_len;
6102*4882a593Smuzhiyun stripe_nr = geom.stripe_nr;
6103*4882a593Smuzhiyun stripe_offset = geom.stripe_offset;
6104*4882a593Smuzhiyun raid56_full_stripe_start = geom.raid56_stripe_offset;
6105*4882a593Smuzhiyun data_stripes = nr_data_stripes(map);
6106*4882a593Smuzhiyun
6107*4882a593Smuzhiyun down_read(&dev_replace->rwsem);
6108*4882a593Smuzhiyun dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6109*4882a593Smuzhiyun /*
6110*4882a593Smuzhiyun * Hold the semaphore for read during the whole operation, write is
6111*4882a593Smuzhiyun * requested at commit time but must wait.
6112*4882a593Smuzhiyun */
6113*4882a593Smuzhiyun if (!dev_replace_is_ongoing)
6114*4882a593Smuzhiyun up_read(&dev_replace->rwsem);
6115*4882a593Smuzhiyun
6116*4882a593Smuzhiyun if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6117*4882a593Smuzhiyun !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6118*4882a593Smuzhiyun ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6119*4882a593Smuzhiyun dev_replace->srcdev->devid,
6120*4882a593Smuzhiyun &mirror_num,
6121*4882a593Smuzhiyun &physical_to_patch_in_first_stripe);
6122*4882a593Smuzhiyun if (ret)
6123*4882a593Smuzhiyun goto out;
6124*4882a593Smuzhiyun else
6125*4882a593Smuzhiyun patch_the_first_stripe_for_dev_replace = 1;
6126*4882a593Smuzhiyun } else if (mirror_num > map->num_stripes) {
6127*4882a593Smuzhiyun mirror_num = 0;
6128*4882a593Smuzhiyun }
6129*4882a593Smuzhiyun
6130*4882a593Smuzhiyun num_stripes = 1;
6131*4882a593Smuzhiyun stripe_index = 0;
6132*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6133*4882a593Smuzhiyun stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6134*4882a593Smuzhiyun &stripe_index);
6135*4882a593Smuzhiyun if (!need_full_stripe(op))
6136*4882a593Smuzhiyun mirror_num = 1;
6137*4882a593Smuzhiyun } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6138*4882a593Smuzhiyun if (need_full_stripe(op))
6139*4882a593Smuzhiyun num_stripes = map->num_stripes;
6140*4882a593Smuzhiyun else if (mirror_num)
6141*4882a593Smuzhiyun stripe_index = mirror_num - 1;
6142*4882a593Smuzhiyun else {
6143*4882a593Smuzhiyun stripe_index = find_live_mirror(fs_info, map, 0,
6144*4882a593Smuzhiyun dev_replace_is_ongoing);
6145*4882a593Smuzhiyun mirror_num = stripe_index + 1;
6146*4882a593Smuzhiyun }
6147*4882a593Smuzhiyun
6148*4882a593Smuzhiyun } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6149*4882a593Smuzhiyun if (need_full_stripe(op)) {
6150*4882a593Smuzhiyun num_stripes = map->num_stripes;
6151*4882a593Smuzhiyun } else if (mirror_num) {
6152*4882a593Smuzhiyun stripe_index = mirror_num - 1;
6153*4882a593Smuzhiyun } else {
6154*4882a593Smuzhiyun mirror_num = 1;
6155*4882a593Smuzhiyun }
6156*4882a593Smuzhiyun
6157*4882a593Smuzhiyun } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6158*4882a593Smuzhiyun u32 factor = map->num_stripes / map->sub_stripes;
6159*4882a593Smuzhiyun
6160*4882a593Smuzhiyun stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6161*4882a593Smuzhiyun stripe_index *= map->sub_stripes;
6162*4882a593Smuzhiyun
6163*4882a593Smuzhiyun if (need_full_stripe(op))
6164*4882a593Smuzhiyun num_stripes = map->sub_stripes;
6165*4882a593Smuzhiyun else if (mirror_num)
6166*4882a593Smuzhiyun stripe_index += mirror_num - 1;
6167*4882a593Smuzhiyun else {
6168*4882a593Smuzhiyun int old_stripe_index = stripe_index;
6169*4882a593Smuzhiyun stripe_index = find_live_mirror(fs_info, map,
6170*4882a593Smuzhiyun stripe_index,
6171*4882a593Smuzhiyun dev_replace_is_ongoing);
6172*4882a593Smuzhiyun mirror_num = stripe_index - old_stripe_index + 1;
6173*4882a593Smuzhiyun }
6174*4882a593Smuzhiyun
6175*4882a593Smuzhiyun } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6176*4882a593Smuzhiyun if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6177*4882a593Smuzhiyun /* push stripe_nr back to the start of the full stripe */
6178*4882a593Smuzhiyun stripe_nr = div64_u64(raid56_full_stripe_start,
6179*4882a593Smuzhiyun stripe_len * data_stripes);
6180*4882a593Smuzhiyun
6181*4882a593Smuzhiyun /* RAID[56] write or recovery. Return all stripes */
6182*4882a593Smuzhiyun num_stripes = map->num_stripes;
6183*4882a593Smuzhiyun max_errors = nr_parity_stripes(map);
6184*4882a593Smuzhiyun
6185*4882a593Smuzhiyun *length = map->stripe_len;
6186*4882a593Smuzhiyun stripe_index = 0;
6187*4882a593Smuzhiyun stripe_offset = 0;
6188*4882a593Smuzhiyun } else {
6189*4882a593Smuzhiyun /*
6190*4882a593Smuzhiyun * Mirror #0 or #1 means the original data block.
6191*4882a593Smuzhiyun * Mirror #2 is RAID5 parity block.
6192*4882a593Smuzhiyun * Mirror #3 is RAID6 Q block.
6193*4882a593Smuzhiyun */
6194*4882a593Smuzhiyun stripe_nr = div_u64_rem(stripe_nr,
6195*4882a593Smuzhiyun data_stripes, &stripe_index);
6196*4882a593Smuzhiyun if (mirror_num > 1)
6197*4882a593Smuzhiyun stripe_index = data_stripes + mirror_num - 2;
6198*4882a593Smuzhiyun
6199*4882a593Smuzhiyun /* We distribute the parity blocks across stripes */
6200*4882a593Smuzhiyun div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6201*4882a593Smuzhiyun &stripe_index);
6202*4882a593Smuzhiyun if (!need_full_stripe(op) && mirror_num <= 1)
6203*4882a593Smuzhiyun mirror_num = 1;
6204*4882a593Smuzhiyun }
6205*4882a593Smuzhiyun } else {
6206*4882a593Smuzhiyun /*
6207*4882a593Smuzhiyun * after this, stripe_nr is the number of stripes on this
6208*4882a593Smuzhiyun * device we have to walk to find the data, and stripe_index is
6209*4882a593Smuzhiyun * the number of our device in the stripe array
6210*4882a593Smuzhiyun */
6211*4882a593Smuzhiyun stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6212*4882a593Smuzhiyun &stripe_index);
6213*4882a593Smuzhiyun mirror_num = stripe_index + 1;
6214*4882a593Smuzhiyun }
6215*4882a593Smuzhiyun if (stripe_index >= map->num_stripes) {
6216*4882a593Smuzhiyun btrfs_crit(fs_info,
6217*4882a593Smuzhiyun "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6218*4882a593Smuzhiyun stripe_index, map->num_stripes);
6219*4882a593Smuzhiyun ret = -EINVAL;
6220*4882a593Smuzhiyun goto out;
6221*4882a593Smuzhiyun }
6222*4882a593Smuzhiyun
6223*4882a593Smuzhiyun num_alloc_stripes = num_stripes;
6224*4882a593Smuzhiyun if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6225*4882a593Smuzhiyun if (op == BTRFS_MAP_WRITE)
6226*4882a593Smuzhiyun num_alloc_stripes <<= 1;
6227*4882a593Smuzhiyun if (op == BTRFS_MAP_GET_READ_MIRRORS)
6228*4882a593Smuzhiyun num_alloc_stripes++;
6229*4882a593Smuzhiyun tgtdev_indexes = num_stripes;
6230*4882a593Smuzhiyun }
6231*4882a593Smuzhiyun
6232*4882a593Smuzhiyun bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6233*4882a593Smuzhiyun if (!bbio) {
6234*4882a593Smuzhiyun ret = -ENOMEM;
6235*4882a593Smuzhiyun goto out;
6236*4882a593Smuzhiyun }
6237*4882a593Smuzhiyun
6238*4882a593Smuzhiyun for (i = 0; i < num_stripes; i++) {
6239*4882a593Smuzhiyun bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6240*4882a593Smuzhiyun stripe_offset + stripe_nr * map->stripe_len;
6241*4882a593Smuzhiyun bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6242*4882a593Smuzhiyun stripe_index++;
6243*4882a593Smuzhiyun }
6244*4882a593Smuzhiyun
6245*4882a593Smuzhiyun /* build raid_map */
6246*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6247*4882a593Smuzhiyun (need_full_stripe(op) || mirror_num > 1)) {
6248*4882a593Smuzhiyun u64 tmp;
6249*4882a593Smuzhiyun unsigned rot;
6250*4882a593Smuzhiyun
6251*4882a593Smuzhiyun /* Work out the disk rotation on this stripe-set */
6252*4882a593Smuzhiyun div_u64_rem(stripe_nr, num_stripes, &rot);
6253*4882a593Smuzhiyun
6254*4882a593Smuzhiyun /* Fill in the logical address of each stripe */
6255*4882a593Smuzhiyun tmp = stripe_nr * data_stripes;
6256*4882a593Smuzhiyun for (i = 0; i < data_stripes; i++)
6257*4882a593Smuzhiyun bbio->raid_map[(i+rot) % num_stripes] =
6258*4882a593Smuzhiyun em->start + (tmp + i) * map->stripe_len;
6259*4882a593Smuzhiyun
6260*4882a593Smuzhiyun bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6261*4882a593Smuzhiyun if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6262*4882a593Smuzhiyun bbio->raid_map[(i+rot+1) % num_stripes] =
6263*4882a593Smuzhiyun RAID6_Q_STRIPE;
6264*4882a593Smuzhiyun
6265*4882a593Smuzhiyun sort_parity_stripes(bbio, num_stripes);
6266*4882a593Smuzhiyun }
6267*4882a593Smuzhiyun
6268*4882a593Smuzhiyun if (need_full_stripe(op))
6269*4882a593Smuzhiyun max_errors = btrfs_chunk_max_errors(map);
6270*4882a593Smuzhiyun
6271*4882a593Smuzhiyun if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6272*4882a593Smuzhiyun need_full_stripe(op)) {
6273*4882a593Smuzhiyun handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6274*4882a593Smuzhiyun &max_errors);
6275*4882a593Smuzhiyun }
6276*4882a593Smuzhiyun
6277*4882a593Smuzhiyun *bbio_ret = bbio;
6278*4882a593Smuzhiyun bbio->map_type = map->type;
6279*4882a593Smuzhiyun bbio->num_stripes = num_stripes;
6280*4882a593Smuzhiyun bbio->max_errors = max_errors;
6281*4882a593Smuzhiyun bbio->mirror_num = mirror_num;
6282*4882a593Smuzhiyun
6283*4882a593Smuzhiyun /*
6284*4882a593Smuzhiyun * this is the case that REQ_READ && dev_replace_is_ongoing &&
6285*4882a593Smuzhiyun * mirror_num == num_stripes + 1 && dev_replace target drive is
6286*4882a593Smuzhiyun * available as a mirror
6287*4882a593Smuzhiyun */
6288*4882a593Smuzhiyun if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6289*4882a593Smuzhiyun WARN_ON(num_stripes > 1);
6290*4882a593Smuzhiyun bbio->stripes[0].dev = dev_replace->tgtdev;
6291*4882a593Smuzhiyun bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6292*4882a593Smuzhiyun bbio->mirror_num = map->num_stripes + 1;
6293*4882a593Smuzhiyun }
6294*4882a593Smuzhiyun out:
6295*4882a593Smuzhiyun if (dev_replace_is_ongoing) {
6296*4882a593Smuzhiyun lockdep_assert_held(&dev_replace->rwsem);
6297*4882a593Smuzhiyun /* Unlock and let waiting writers proceed */
6298*4882a593Smuzhiyun up_read(&dev_replace->rwsem);
6299*4882a593Smuzhiyun }
6300*4882a593Smuzhiyun free_extent_map(em);
6301*4882a593Smuzhiyun return ret;
6302*4882a593Smuzhiyun }
6303*4882a593Smuzhiyun
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)6304*4882a593Smuzhiyun int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6305*4882a593Smuzhiyun u64 logical, u64 *length,
6306*4882a593Smuzhiyun struct btrfs_bio **bbio_ret, int mirror_num)
6307*4882a593Smuzhiyun {
6308*4882a593Smuzhiyun if (op == BTRFS_MAP_DISCARD)
6309*4882a593Smuzhiyun return __btrfs_map_block_for_discard(fs_info, logical,
6310*4882a593Smuzhiyun length, bbio_ret);
6311*4882a593Smuzhiyun
6312*4882a593Smuzhiyun return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6313*4882a593Smuzhiyun mirror_num, 0);
6314*4882a593Smuzhiyun }
6315*4882a593Smuzhiyun
6316*4882a593Smuzhiyun /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret)6317*4882a593Smuzhiyun int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6318*4882a593Smuzhiyun u64 logical, u64 *length,
6319*4882a593Smuzhiyun struct btrfs_bio **bbio_ret)
6320*4882a593Smuzhiyun {
6321*4882a593Smuzhiyun return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6322*4882a593Smuzhiyun }
6323*4882a593Smuzhiyun
btrfs_end_bbio(struct btrfs_bio * bbio,struct bio * bio)6324*4882a593Smuzhiyun static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6325*4882a593Smuzhiyun {
6326*4882a593Smuzhiyun bio->bi_private = bbio->private;
6327*4882a593Smuzhiyun bio->bi_end_io = bbio->end_io;
6328*4882a593Smuzhiyun bio_endio(bio);
6329*4882a593Smuzhiyun
6330*4882a593Smuzhiyun btrfs_put_bbio(bbio);
6331*4882a593Smuzhiyun }
6332*4882a593Smuzhiyun
btrfs_end_bio(struct bio * bio)6333*4882a593Smuzhiyun static void btrfs_end_bio(struct bio *bio)
6334*4882a593Smuzhiyun {
6335*4882a593Smuzhiyun struct btrfs_bio *bbio = bio->bi_private;
6336*4882a593Smuzhiyun int is_orig_bio = 0;
6337*4882a593Smuzhiyun
6338*4882a593Smuzhiyun if (bio->bi_status) {
6339*4882a593Smuzhiyun atomic_inc(&bbio->error);
6340*4882a593Smuzhiyun if (bio->bi_status == BLK_STS_IOERR ||
6341*4882a593Smuzhiyun bio->bi_status == BLK_STS_TARGET) {
6342*4882a593Smuzhiyun struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6343*4882a593Smuzhiyun
6344*4882a593Smuzhiyun ASSERT(dev->bdev);
6345*4882a593Smuzhiyun if (bio_op(bio) == REQ_OP_WRITE)
6346*4882a593Smuzhiyun btrfs_dev_stat_inc_and_print(dev,
6347*4882a593Smuzhiyun BTRFS_DEV_STAT_WRITE_ERRS);
6348*4882a593Smuzhiyun else if (!(bio->bi_opf & REQ_RAHEAD))
6349*4882a593Smuzhiyun btrfs_dev_stat_inc_and_print(dev,
6350*4882a593Smuzhiyun BTRFS_DEV_STAT_READ_ERRS);
6351*4882a593Smuzhiyun if (bio->bi_opf & REQ_PREFLUSH)
6352*4882a593Smuzhiyun btrfs_dev_stat_inc_and_print(dev,
6353*4882a593Smuzhiyun BTRFS_DEV_STAT_FLUSH_ERRS);
6354*4882a593Smuzhiyun }
6355*4882a593Smuzhiyun }
6356*4882a593Smuzhiyun
6357*4882a593Smuzhiyun if (bio == bbio->orig_bio)
6358*4882a593Smuzhiyun is_orig_bio = 1;
6359*4882a593Smuzhiyun
6360*4882a593Smuzhiyun btrfs_bio_counter_dec(bbio->fs_info);
6361*4882a593Smuzhiyun
6362*4882a593Smuzhiyun if (atomic_dec_and_test(&bbio->stripes_pending)) {
6363*4882a593Smuzhiyun if (!is_orig_bio) {
6364*4882a593Smuzhiyun bio_put(bio);
6365*4882a593Smuzhiyun bio = bbio->orig_bio;
6366*4882a593Smuzhiyun }
6367*4882a593Smuzhiyun
6368*4882a593Smuzhiyun btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6369*4882a593Smuzhiyun /* only send an error to the higher layers if it is
6370*4882a593Smuzhiyun * beyond the tolerance of the btrfs bio
6371*4882a593Smuzhiyun */
6372*4882a593Smuzhiyun if (atomic_read(&bbio->error) > bbio->max_errors) {
6373*4882a593Smuzhiyun bio->bi_status = BLK_STS_IOERR;
6374*4882a593Smuzhiyun } else {
6375*4882a593Smuzhiyun /*
6376*4882a593Smuzhiyun * this bio is actually up to date, we didn't
6377*4882a593Smuzhiyun * go over the max number of errors
6378*4882a593Smuzhiyun */
6379*4882a593Smuzhiyun bio->bi_status = BLK_STS_OK;
6380*4882a593Smuzhiyun }
6381*4882a593Smuzhiyun
6382*4882a593Smuzhiyun btrfs_end_bbio(bbio, bio);
6383*4882a593Smuzhiyun } else if (!is_orig_bio) {
6384*4882a593Smuzhiyun bio_put(bio);
6385*4882a593Smuzhiyun }
6386*4882a593Smuzhiyun }
6387*4882a593Smuzhiyun
submit_stripe_bio(struct btrfs_bio * bbio,struct bio * bio,u64 physical,struct btrfs_device * dev)6388*4882a593Smuzhiyun static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6389*4882a593Smuzhiyun u64 physical, struct btrfs_device *dev)
6390*4882a593Smuzhiyun {
6391*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = bbio->fs_info;
6392*4882a593Smuzhiyun
6393*4882a593Smuzhiyun bio->bi_private = bbio;
6394*4882a593Smuzhiyun btrfs_io_bio(bio)->device = dev;
6395*4882a593Smuzhiyun bio->bi_end_io = btrfs_end_bio;
6396*4882a593Smuzhiyun bio->bi_iter.bi_sector = physical >> 9;
6397*4882a593Smuzhiyun btrfs_debug_in_rcu(fs_info,
6398*4882a593Smuzhiyun "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6399*4882a593Smuzhiyun bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6400*4882a593Smuzhiyun (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6401*4882a593Smuzhiyun dev->devid, bio->bi_iter.bi_size);
6402*4882a593Smuzhiyun bio_set_dev(bio, dev->bdev);
6403*4882a593Smuzhiyun
6404*4882a593Smuzhiyun btrfs_bio_counter_inc_noblocked(fs_info);
6405*4882a593Smuzhiyun
6406*4882a593Smuzhiyun btrfsic_submit_bio(bio);
6407*4882a593Smuzhiyun }
6408*4882a593Smuzhiyun
bbio_error(struct btrfs_bio * bbio,struct bio * bio,u64 logical)6409*4882a593Smuzhiyun static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6410*4882a593Smuzhiyun {
6411*4882a593Smuzhiyun atomic_inc(&bbio->error);
6412*4882a593Smuzhiyun if (atomic_dec_and_test(&bbio->stripes_pending)) {
6413*4882a593Smuzhiyun /* Should be the original bio. */
6414*4882a593Smuzhiyun WARN_ON(bio != bbio->orig_bio);
6415*4882a593Smuzhiyun
6416*4882a593Smuzhiyun btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6417*4882a593Smuzhiyun bio->bi_iter.bi_sector = logical >> 9;
6418*4882a593Smuzhiyun if (atomic_read(&bbio->error) > bbio->max_errors)
6419*4882a593Smuzhiyun bio->bi_status = BLK_STS_IOERR;
6420*4882a593Smuzhiyun else
6421*4882a593Smuzhiyun bio->bi_status = BLK_STS_OK;
6422*4882a593Smuzhiyun btrfs_end_bbio(bbio, bio);
6423*4882a593Smuzhiyun }
6424*4882a593Smuzhiyun }
6425*4882a593Smuzhiyun
btrfs_map_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num)6426*4882a593Smuzhiyun blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6427*4882a593Smuzhiyun int mirror_num)
6428*4882a593Smuzhiyun {
6429*4882a593Smuzhiyun struct btrfs_device *dev;
6430*4882a593Smuzhiyun struct bio *first_bio = bio;
6431*4882a593Smuzhiyun u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6432*4882a593Smuzhiyun u64 length = 0;
6433*4882a593Smuzhiyun u64 map_length;
6434*4882a593Smuzhiyun int ret;
6435*4882a593Smuzhiyun int dev_nr;
6436*4882a593Smuzhiyun int total_devs;
6437*4882a593Smuzhiyun struct btrfs_bio *bbio = NULL;
6438*4882a593Smuzhiyun
6439*4882a593Smuzhiyun length = bio->bi_iter.bi_size;
6440*4882a593Smuzhiyun map_length = length;
6441*4882a593Smuzhiyun
6442*4882a593Smuzhiyun btrfs_bio_counter_inc_blocked(fs_info);
6443*4882a593Smuzhiyun ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6444*4882a593Smuzhiyun &map_length, &bbio, mirror_num, 1);
6445*4882a593Smuzhiyun if (ret) {
6446*4882a593Smuzhiyun btrfs_bio_counter_dec(fs_info);
6447*4882a593Smuzhiyun return errno_to_blk_status(ret);
6448*4882a593Smuzhiyun }
6449*4882a593Smuzhiyun
6450*4882a593Smuzhiyun total_devs = bbio->num_stripes;
6451*4882a593Smuzhiyun bbio->orig_bio = first_bio;
6452*4882a593Smuzhiyun bbio->private = first_bio->bi_private;
6453*4882a593Smuzhiyun bbio->end_io = first_bio->bi_end_io;
6454*4882a593Smuzhiyun bbio->fs_info = fs_info;
6455*4882a593Smuzhiyun atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6456*4882a593Smuzhiyun
6457*4882a593Smuzhiyun if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6458*4882a593Smuzhiyun ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6459*4882a593Smuzhiyun /* In this case, map_length has been set to the length of
6460*4882a593Smuzhiyun a single stripe; not the whole write */
6461*4882a593Smuzhiyun if (bio_op(bio) == REQ_OP_WRITE) {
6462*4882a593Smuzhiyun ret = raid56_parity_write(fs_info, bio, bbio,
6463*4882a593Smuzhiyun map_length);
6464*4882a593Smuzhiyun } else {
6465*4882a593Smuzhiyun ret = raid56_parity_recover(fs_info, bio, bbio,
6466*4882a593Smuzhiyun map_length, mirror_num, 1);
6467*4882a593Smuzhiyun }
6468*4882a593Smuzhiyun
6469*4882a593Smuzhiyun btrfs_bio_counter_dec(fs_info);
6470*4882a593Smuzhiyun return errno_to_blk_status(ret);
6471*4882a593Smuzhiyun }
6472*4882a593Smuzhiyun
6473*4882a593Smuzhiyun if (map_length < length) {
6474*4882a593Smuzhiyun btrfs_crit(fs_info,
6475*4882a593Smuzhiyun "mapping failed logical %llu bio len %llu len %llu",
6476*4882a593Smuzhiyun logical, length, map_length);
6477*4882a593Smuzhiyun BUG();
6478*4882a593Smuzhiyun }
6479*4882a593Smuzhiyun
6480*4882a593Smuzhiyun for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6481*4882a593Smuzhiyun dev = bbio->stripes[dev_nr].dev;
6482*4882a593Smuzhiyun if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6483*4882a593Smuzhiyun &dev->dev_state) ||
6484*4882a593Smuzhiyun (bio_op(first_bio) == REQ_OP_WRITE &&
6485*4882a593Smuzhiyun !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6486*4882a593Smuzhiyun bbio_error(bbio, first_bio, logical);
6487*4882a593Smuzhiyun continue;
6488*4882a593Smuzhiyun }
6489*4882a593Smuzhiyun
6490*4882a593Smuzhiyun if (dev_nr < total_devs - 1)
6491*4882a593Smuzhiyun bio = btrfs_bio_clone(first_bio);
6492*4882a593Smuzhiyun else
6493*4882a593Smuzhiyun bio = first_bio;
6494*4882a593Smuzhiyun
6495*4882a593Smuzhiyun submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6496*4882a593Smuzhiyun }
6497*4882a593Smuzhiyun btrfs_bio_counter_dec(fs_info);
6498*4882a593Smuzhiyun return BLK_STS_OK;
6499*4882a593Smuzhiyun }
6500*4882a593Smuzhiyun
6501*4882a593Smuzhiyun /*
6502*4882a593Smuzhiyun * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6503*4882a593Smuzhiyun * return NULL.
6504*4882a593Smuzhiyun *
6505*4882a593Smuzhiyun * If devid and uuid are both specified, the match must be exact, otherwise
6506*4882a593Smuzhiyun * only devid is used.
6507*4882a593Smuzhiyun *
6508*4882a593Smuzhiyun * If @seed is true, traverse through the seed devices.
6509*4882a593Smuzhiyun */
btrfs_find_device(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * uuid,u8 * fsid,bool seed)6510*4882a593Smuzhiyun struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6511*4882a593Smuzhiyun u64 devid, u8 *uuid, u8 *fsid,
6512*4882a593Smuzhiyun bool seed)
6513*4882a593Smuzhiyun {
6514*4882a593Smuzhiyun struct btrfs_device *device;
6515*4882a593Smuzhiyun struct btrfs_fs_devices *seed_devs;
6516*4882a593Smuzhiyun
6517*4882a593Smuzhiyun if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6518*4882a593Smuzhiyun list_for_each_entry(device, &fs_devices->devices, dev_list) {
6519*4882a593Smuzhiyun if (device->devid == devid &&
6520*4882a593Smuzhiyun (!uuid || memcmp(device->uuid, uuid,
6521*4882a593Smuzhiyun BTRFS_UUID_SIZE) == 0))
6522*4882a593Smuzhiyun return device;
6523*4882a593Smuzhiyun }
6524*4882a593Smuzhiyun }
6525*4882a593Smuzhiyun
6526*4882a593Smuzhiyun list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6527*4882a593Smuzhiyun if (!fsid ||
6528*4882a593Smuzhiyun !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6529*4882a593Smuzhiyun list_for_each_entry(device, &seed_devs->devices,
6530*4882a593Smuzhiyun dev_list) {
6531*4882a593Smuzhiyun if (device->devid == devid &&
6532*4882a593Smuzhiyun (!uuid || memcmp(device->uuid, uuid,
6533*4882a593Smuzhiyun BTRFS_UUID_SIZE) == 0))
6534*4882a593Smuzhiyun return device;
6535*4882a593Smuzhiyun }
6536*4882a593Smuzhiyun }
6537*4882a593Smuzhiyun }
6538*4882a593Smuzhiyun
6539*4882a593Smuzhiyun return NULL;
6540*4882a593Smuzhiyun }
6541*4882a593Smuzhiyun
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6542*4882a593Smuzhiyun static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6543*4882a593Smuzhiyun u64 devid, u8 *dev_uuid)
6544*4882a593Smuzhiyun {
6545*4882a593Smuzhiyun struct btrfs_device *device;
6546*4882a593Smuzhiyun unsigned int nofs_flag;
6547*4882a593Smuzhiyun
6548*4882a593Smuzhiyun /*
6549*4882a593Smuzhiyun * We call this under the chunk_mutex, so we want to use NOFS for this
6550*4882a593Smuzhiyun * allocation, however we don't want to change btrfs_alloc_device() to
6551*4882a593Smuzhiyun * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6552*4882a593Smuzhiyun * places.
6553*4882a593Smuzhiyun */
6554*4882a593Smuzhiyun nofs_flag = memalloc_nofs_save();
6555*4882a593Smuzhiyun device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6556*4882a593Smuzhiyun memalloc_nofs_restore(nofs_flag);
6557*4882a593Smuzhiyun if (IS_ERR(device))
6558*4882a593Smuzhiyun return device;
6559*4882a593Smuzhiyun
6560*4882a593Smuzhiyun list_add(&device->dev_list, &fs_devices->devices);
6561*4882a593Smuzhiyun device->fs_devices = fs_devices;
6562*4882a593Smuzhiyun fs_devices->num_devices++;
6563*4882a593Smuzhiyun
6564*4882a593Smuzhiyun set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6565*4882a593Smuzhiyun fs_devices->missing_devices++;
6566*4882a593Smuzhiyun
6567*4882a593Smuzhiyun return device;
6568*4882a593Smuzhiyun }
6569*4882a593Smuzhiyun
6570*4882a593Smuzhiyun /**
6571*4882a593Smuzhiyun * btrfs_alloc_device - allocate struct btrfs_device
6572*4882a593Smuzhiyun * @fs_info: used only for generating a new devid, can be NULL if
6573*4882a593Smuzhiyun * devid is provided (i.e. @devid != NULL).
6574*4882a593Smuzhiyun * @devid: a pointer to devid for this device. If NULL a new devid
6575*4882a593Smuzhiyun * is generated.
6576*4882a593Smuzhiyun * @uuid: a pointer to UUID for this device. If NULL a new UUID
6577*4882a593Smuzhiyun * is generated.
6578*4882a593Smuzhiyun *
6579*4882a593Smuzhiyun * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6580*4882a593Smuzhiyun * on error. Returned struct is not linked onto any lists and must be
6581*4882a593Smuzhiyun * destroyed with btrfs_free_device.
6582*4882a593Smuzhiyun */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)6583*4882a593Smuzhiyun struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6584*4882a593Smuzhiyun const u64 *devid,
6585*4882a593Smuzhiyun const u8 *uuid)
6586*4882a593Smuzhiyun {
6587*4882a593Smuzhiyun struct btrfs_device *dev;
6588*4882a593Smuzhiyun u64 tmp;
6589*4882a593Smuzhiyun
6590*4882a593Smuzhiyun if (WARN_ON(!devid && !fs_info))
6591*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
6592*4882a593Smuzhiyun
6593*4882a593Smuzhiyun dev = __alloc_device(fs_info);
6594*4882a593Smuzhiyun if (IS_ERR(dev))
6595*4882a593Smuzhiyun return dev;
6596*4882a593Smuzhiyun
6597*4882a593Smuzhiyun if (devid)
6598*4882a593Smuzhiyun tmp = *devid;
6599*4882a593Smuzhiyun else {
6600*4882a593Smuzhiyun int ret;
6601*4882a593Smuzhiyun
6602*4882a593Smuzhiyun ret = find_next_devid(fs_info, &tmp);
6603*4882a593Smuzhiyun if (ret) {
6604*4882a593Smuzhiyun btrfs_free_device(dev);
6605*4882a593Smuzhiyun return ERR_PTR(ret);
6606*4882a593Smuzhiyun }
6607*4882a593Smuzhiyun }
6608*4882a593Smuzhiyun dev->devid = tmp;
6609*4882a593Smuzhiyun
6610*4882a593Smuzhiyun if (uuid)
6611*4882a593Smuzhiyun memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6612*4882a593Smuzhiyun else
6613*4882a593Smuzhiyun generate_random_uuid(dev->uuid);
6614*4882a593Smuzhiyun
6615*4882a593Smuzhiyun return dev;
6616*4882a593Smuzhiyun }
6617*4882a593Smuzhiyun
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)6618*4882a593Smuzhiyun static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6619*4882a593Smuzhiyun u64 devid, u8 *uuid, bool error)
6620*4882a593Smuzhiyun {
6621*4882a593Smuzhiyun if (error)
6622*4882a593Smuzhiyun btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6623*4882a593Smuzhiyun devid, uuid);
6624*4882a593Smuzhiyun else
6625*4882a593Smuzhiyun btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6626*4882a593Smuzhiyun devid, uuid);
6627*4882a593Smuzhiyun }
6628*4882a593Smuzhiyun
calc_stripe_length(u64 type,u64 chunk_len,int num_stripes)6629*4882a593Smuzhiyun static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6630*4882a593Smuzhiyun {
6631*4882a593Smuzhiyun int index = btrfs_bg_flags_to_raid_index(type);
6632*4882a593Smuzhiyun int ncopies = btrfs_raid_array[index].ncopies;
6633*4882a593Smuzhiyun const int nparity = btrfs_raid_array[index].nparity;
6634*4882a593Smuzhiyun int data_stripes;
6635*4882a593Smuzhiyun
6636*4882a593Smuzhiyun if (nparity)
6637*4882a593Smuzhiyun data_stripes = num_stripes - nparity;
6638*4882a593Smuzhiyun else
6639*4882a593Smuzhiyun data_stripes = num_stripes / ncopies;
6640*4882a593Smuzhiyun
6641*4882a593Smuzhiyun return div_u64(chunk_len, data_stripes);
6642*4882a593Smuzhiyun }
6643*4882a593Smuzhiyun
read_one_chunk(struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)6644*4882a593Smuzhiyun static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6645*4882a593Smuzhiyun struct btrfs_chunk *chunk)
6646*4882a593Smuzhiyun {
6647*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = leaf->fs_info;
6648*4882a593Smuzhiyun struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6649*4882a593Smuzhiyun struct map_lookup *map;
6650*4882a593Smuzhiyun struct extent_map *em;
6651*4882a593Smuzhiyun u64 logical;
6652*4882a593Smuzhiyun u64 length;
6653*4882a593Smuzhiyun u64 devid;
6654*4882a593Smuzhiyun u8 uuid[BTRFS_UUID_SIZE];
6655*4882a593Smuzhiyun int num_stripes;
6656*4882a593Smuzhiyun int ret;
6657*4882a593Smuzhiyun int i;
6658*4882a593Smuzhiyun
6659*4882a593Smuzhiyun logical = key->offset;
6660*4882a593Smuzhiyun length = btrfs_chunk_length(leaf, chunk);
6661*4882a593Smuzhiyun num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6662*4882a593Smuzhiyun
6663*4882a593Smuzhiyun /*
6664*4882a593Smuzhiyun * Only need to verify chunk item if we're reading from sys chunk array,
6665*4882a593Smuzhiyun * as chunk item in tree block is already verified by tree-checker.
6666*4882a593Smuzhiyun */
6667*4882a593Smuzhiyun if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6668*4882a593Smuzhiyun ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6669*4882a593Smuzhiyun if (ret)
6670*4882a593Smuzhiyun return ret;
6671*4882a593Smuzhiyun }
6672*4882a593Smuzhiyun
6673*4882a593Smuzhiyun read_lock(&map_tree->lock);
6674*4882a593Smuzhiyun em = lookup_extent_mapping(map_tree, logical, 1);
6675*4882a593Smuzhiyun read_unlock(&map_tree->lock);
6676*4882a593Smuzhiyun
6677*4882a593Smuzhiyun /* already mapped? */
6678*4882a593Smuzhiyun if (em && em->start <= logical && em->start + em->len > logical) {
6679*4882a593Smuzhiyun free_extent_map(em);
6680*4882a593Smuzhiyun return 0;
6681*4882a593Smuzhiyun } else if (em) {
6682*4882a593Smuzhiyun free_extent_map(em);
6683*4882a593Smuzhiyun }
6684*4882a593Smuzhiyun
6685*4882a593Smuzhiyun em = alloc_extent_map();
6686*4882a593Smuzhiyun if (!em)
6687*4882a593Smuzhiyun return -ENOMEM;
6688*4882a593Smuzhiyun map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6689*4882a593Smuzhiyun if (!map) {
6690*4882a593Smuzhiyun free_extent_map(em);
6691*4882a593Smuzhiyun return -ENOMEM;
6692*4882a593Smuzhiyun }
6693*4882a593Smuzhiyun
6694*4882a593Smuzhiyun set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6695*4882a593Smuzhiyun em->map_lookup = map;
6696*4882a593Smuzhiyun em->start = logical;
6697*4882a593Smuzhiyun em->len = length;
6698*4882a593Smuzhiyun em->orig_start = 0;
6699*4882a593Smuzhiyun em->block_start = 0;
6700*4882a593Smuzhiyun em->block_len = em->len;
6701*4882a593Smuzhiyun
6702*4882a593Smuzhiyun map->num_stripes = num_stripes;
6703*4882a593Smuzhiyun map->io_width = btrfs_chunk_io_width(leaf, chunk);
6704*4882a593Smuzhiyun map->io_align = btrfs_chunk_io_align(leaf, chunk);
6705*4882a593Smuzhiyun map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6706*4882a593Smuzhiyun map->type = btrfs_chunk_type(leaf, chunk);
6707*4882a593Smuzhiyun map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6708*4882a593Smuzhiyun map->verified_stripes = 0;
6709*4882a593Smuzhiyun em->orig_block_len = calc_stripe_length(map->type, em->len,
6710*4882a593Smuzhiyun map->num_stripes);
6711*4882a593Smuzhiyun for (i = 0; i < num_stripes; i++) {
6712*4882a593Smuzhiyun map->stripes[i].physical =
6713*4882a593Smuzhiyun btrfs_stripe_offset_nr(leaf, chunk, i);
6714*4882a593Smuzhiyun devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6715*4882a593Smuzhiyun read_extent_buffer(leaf, uuid, (unsigned long)
6716*4882a593Smuzhiyun btrfs_stripe_dev_uuid_nr(chunk, i),
6717*4882a593Smuzhiyun BTRFS_UUID_SIZE);
6718*4882a593Smuzhiyun map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6719*4882a593Smuzhiyun devid, uuid, NULL, true);
6720*4882a593Smuzhiyun if (!map->stripes[i].dev &&
6721*4882a593Smuzhiyun !btrfs_test_opt(fs_info, DEGRADED)) {
6722*4882a593Smuzhiyun free_extent_map(em);
6723*4882a593Smuzhiyun btrfs_report_missing_device(fs_info, devid, uuid, true);
6724*4882a593Smuzhiyun return -ENOENT;
6725*4882a593Smuzhiyun }
6726*4882a593Smuzhiyun if (!map->stripes[i].dev) {
6727*4882a593Smuzhiyun map->stripes[i].dev =
6728*4882a593Smuzhiyun add_missing_dev(fs_info->fs_devices, devid,
6729*4882a593Smuzhiyun uuid);
6730*4882a593Smuzhiyun if (IS_ERR(map->stripes[i].dev)) {
6731*4882a593Smuzhiyun free_extent_map(em);
6732*4882a593Smuzhiyun btrfs_err(fs_info,
6733*4882a593Smuzhiyun "failed to init missing dev %llu: %ld",
6734*4882a593Smuzhiyun devid, PTR_ERR(map->stripes[i].dev));
6735*4882a593Smuzhiyun return PTR_ERR(map->stripes[i].dev);
6736*4882a593Smuzhiyun }
6737*4882a593Smuzhiyun btrfs_report_missing_device(fs_info, devid, uuid, false);
6738*4882a593Smuzhiyun }
6739*4882a593Smuzhiyun set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6740*4882a593Smuzhiyun &(map->stripes[i].dev->dev_state));
6741*4882a593Smuzhiyun
6742*4882a593Smuzhiyun }
6743*4882a593Smuzhiyun
6744*4882a593Smuzhiyun write_lock(&map_tree->lock);
6745*4882a593Smuzhiyun ret = add_extent_mapping(map_tree, em, 0);
6746*4882a593Smuzhiyun write_unlock(&map_tree->lock);
6747*4882a593Smuzhiyun if (ret < 0) {
6748*4882a593Smuzhiyun btrfs_err(fs_info,
6749*4882a593Smuzhiyun "failed to add chunk map, start=%llu len=%llu: %d",
6750*4882a593Smuzhiyun em->start, em->len, ret);
6751*4882a593Smuzhiyun }
6752*4882a593Smuzhiyun free_extent_map(em);
6753*4882a593Smuzhiyun
6754*4882a593Smuzhiyun return ret;
6755*4882a593Smuzhiyun }
6756*4882a593Smuzhiyun
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)6757*4882a593Smuzhiyun static void fill_device_from_item(struct extent_buffer *leaf,
6758*4882a593Smuzhiyun struct btrfs_dev_item *dev_item,
6759*4882a593Smuzhiyun struct btrfs_device *device)
6760*4882a593Smuzhiyun {
6761*4882a593Smuzhiyun unsigned long ptr;
6762*4882a593Smuzhiyun
6763*4882a593Smuzhiyun device->devid = btrfs_device_id(leaf, dev_item);
6764*4882a593Smuzhiyun device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6765*4882a593Smuzhiyun device->total_bytes = device->disk_total_bytes;
6766*4882a593Smuzhiyun device->commit_total_bytes = device->disk_total_bytes;
6767*4882a593Smuzhiyun device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6768*4882a593Smuzhiyun device->commit_bytes_used = device->bytes_used;
6769*4882a593Smuzhiyun device->type = btrfs_device_type(leaf, dev_item);
6770*4882a593Smuzhiyun device->io_align = btrfs_device_io_align(leaf, dev_item);
6771*4882a593Smuzhiyun device->io_width = btrfs_device_io_width(leaf, dev_item);
6772*4882a593Smuzhiyun device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6773*4882a593Smuzhiyun WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6774*4882a593Smuzhiyun clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6775*4882a593Smuzhiyun
6776*4882a593Smuzhiyun ptr = btrfs_device_uuid(dev_item);
6777*4882a593Smuzhiyun read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6778*4882a593Smuzhiyun }
6779*4882a593Smuzhiyun
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)6780*4882a593Smuzhiyun static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6781*4882a593Smuzhiyun u8 *fsid)
6782*4882a593Smuzhiyun {
6783*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices;
6784*4882a593Smuzhiyun int ret;
6785*4882a593Smuzhiyun
6786*4882a593Smuzhiyun lockdep_assert_held(&uuid_mutex);
6787*4882a593Smuzhiyun ASSERT(fsid);
6788*4882a593Smuzhiyun
6789*4882a593Smuzhiyun /* This will match only for multi-device seed fs */
6790*4882a593Smuzhiyun list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
6791*4882a593Smuzhiyun if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6792*4882a593Smuzhiyun return fs_devices;
6793*4882a593Smuzhiyun
6794*4882a593Smuzhiyun
6795*4882a593Smuzhiyun fs_devices = find_fsid(fsid, NULL);
6796*4882a593Smuzhiyun if (!fs_devices) {
6797*4882a593Smuzhiyun if (!btrfs_test_opt(fs_info, DEGRADED))
6798*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
6799*4882a593Smuzhiyun
6800*4882a593Smuzhiyun fs_devices = alloc_fs_devices(fsid, NULL);
6801*4882a593Smuzhiyun if (IS_ERR(fs_devices))
6802*4882a593Smuzhiyun return fs_devices;
6803*4882a593Smuzhiyun
6804*4882a593Smuzhiyun fs_devices->seeding = true;
6805*4882a593Smuzhiyun fs_devices->opened = 1;
6806*4882a593Smuzhiyun return fs_devices;
6807*4882a593Smuzhiyun }
6808*4882a593Smuzhiyun
6809*4882a593Smuzhiyun /*
6810*4882a593Smuzhiyun * Upon first call for a seed fs fsid, just create a private copy of the
6811*4882a593Smuzhiyun * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
6812*4882a593Smuzhiyun */
6813*4882a593Smuzhiyun fs_devices = clone_fs_devices(fs_devices);
6814*4882a593Smuzhiyun if (IS_ERR(fs_devices))
6815*4882a593Smuzhiyun return fs_devices;
6816*4882a593Smuzhiyun
6817*4882a593Smuzhiyun ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6818*4882a593Smuzhiyun if (ret) {
6819*4882a593Smuzhiyun free_fs_devices(fs_devices);
6820*4882a593Smuzhiyun return ERR_PTR(ret);
6821*4882a593Smuzhiyun }
6822*4882a593Smuzhiyun
6823*4882a593Smuzhiyun if (!fs_devices->seeding) {
6824*4882a593Smuzhiyun close_fs_devices(fs_devices);
6825*4882a593Smuzhiyun free_fs_devices(fs_devices);
6826*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
6827*4882a593Smuzhiyun }
6828*4882a593Smuzhiyun
6829*4882a593Smuzhiyun list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
6830*4882a593Smuzhiyun
6831*4882a593Smuzhiyun return fs_devices;
6832*4882a593Smuzhiyun }
6833*4882a593Smuzhiyun
read_one_dev(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)6834*4882a593Smuzhiyun static int read_one_dev(struct extent_buffer *leaf,
6835*4882a593Smuzhiyun struct btrfs_dev_item *dev_item)
6836*4882a593Smuzhiyun {
6837*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = leaf->fs_info;
6838*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6839*4882a593Smuzhiyun struct btrfs_device *device;
6840*4882a593Smuzhiyun u64 devid;
6841*4882a593Smuzhiyun int ret;
6842*4882a593Smuzhiyun u8 fs_uuid[BTRFS_FSID_SIZE];
6843*4882a593Smuzhiyun u8 dev_uuid[BTRFS_UUID_SIZE];
6844*4882a593Smuzhiyun
6845*4882a593Smuzhiyun devid = btrfs_device_id(leaf, dev_item);
6846*4882a593Smuzhiyun read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6847*4882a593Smuzhiyun BTRFS_UUID_SIZE);
6848*4882a593Smuzhiyun read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6849*4882a593Smuzhiyun BTRFS_FSID_SIZE);
6850*4882a593Smuzhiyun
6851*4882a593Smuzhiyun if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6852*4882a593Smuzhiyun fs_devices = open_seed_devices(fs_info, fs_uuid);
6853*4882a593Smuzhiyun if (IS_ERR(fs_devices))
6854*4882a593Smuzhiyun return PTR_ERR(fs_devices);
6855*4882a593Smuzhiyun }
6856*4882a593Smuzhiyun
6857*4882a593Smuzhiyun device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6858*4882a593Smuzhiyun fs_uuid, true);
6859*4882a593Smuzhiyun if (!device) {
6860*4882a593Smuzhiyun if (!btrfs_test_opt(fs_info, DEGRADED)) {
6861*4882a593Smuzhiyun btrfs_report_missing_device(fs_info, devid,
6862*4882a593Smuzhiyun dev_uuid, true);
6863*4882a593Smuzhiyun return -ENOENT;
6864*4882a593Smuzhiyun }
6865*4882a593Smuzhiyun
6866*4882a593Smuzhiyun device = add_missing_dev(fs_devices, devid, dev_uuid);
6867*4882a593Smuzhiyun if (IS_ERR(device)) {
6868*4882a593Smuzhiyun btrfs_err(fs_info,
6869*4882a593Smuzhiyun "failed to add missing dev %llu: %ld",
6870*4882a593Smuzhiyun devid, PTR_ERR(device));
6871*4882a593Smuzhiyun return PTR_ERR(device);
6872*4882a593Smuzhiyun }
6873*4882a593Smuzhiyun btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6874*4882a593Smuzhiyun } else {
6875*4882a593Smuzhiyun if (!device->bdev) {
6876*4882a593Smuzhiyun if (!btrfs_test_opt(fs_info, DEGRADED)) {
6877*4882a593Smuzhiyun btrfs_report_missing_device(fs_info,
6878*4882a593Smuzhiyun devid, dev_uuid, true);
6879*4882a593Smuzhiyun return -ENOENT;
6880*4882a593Smuzhiyun }
6881*4882a593Smuzhiyun btrfs_report_missing_device(fs_info, devid,
6882*4882a593Smuzhiyun dev_uuid, false);
6883*4882a593Smuzhiyun }
6884*4882a593Smuzhiyun
6885*4882a593Smuzhiyun if (!device->bdev &&
6886*4882a593Smuzhiyun !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6887*4882a593Smuzhiyun /*
6888*4882a593Smuzhiyun * this happens when a device that was properly setup
6889*4882a593Smuzhiyun * in the device info lists suddenly goes bad.
6890*4882a593Smuzhiyun * device->bdev is NULL, and so we have to set
6891*4882a593Smuzhiyun * device->missing to one here
6892*4882a593Smuzhiyun */
6893*4882a593Smuzhiyun device->fs_devices->missing_devices++;
6894*4882a593Smuzhiyun set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6895*4882a593Smuzhiyun }
6896*4882a593Smuzhiyun
6897*4882a593Smuzhiyun /* Move the device to its own fs_devices */
6898*4882a593Smuzhiyun if (device->fs_devices != fs_devices) {
6899*4882a593Smuzhiyun ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6900*4882a593Smuzhiyun &device->dev_state));
6901*4882a593Smuzhiyun
6902*4882a593Smuzhiyun list_move(&device->dev_list, &fs_devices->devices);
6903*4882a593Smuzhiyun device->fs_devices->num_devices--;
6904*4882a593Smuzhiyun fs_devices->num_devices++;
6905*4882a593Smuzhiyun
6906*4882a593Smuzhiyun device->fs_devices->missing_devices--;
6907*4882a593Smuzhiyun fs_devices->missing_devices++;
6908*4882a593Smuzhiyun
6909*4882a593Smuzhiyun device->fs_devices = fs_devices;
6910*4882a593Smuzhiyun }
6911*4882a593Smuzhiyun }
6912*4882a593Smuzhiyun
6913*4882a593Smuzhiyun if (device->fs_devices != fs_info->fs_devices) {
6914*4882a593Smuzhiyun BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6915*4882a593Smuzhiyun if (device->generation !=
6916*4882a593Smuzhiyun btrfs_device_generation(leaf, dev_item))
6917*4882a593Smuzhiyun return -EINVAL;
6918*4882a593Smuzhiyun }
6919*4882a593Smuzhiyun
6920*4882a593Smuzhiyun fill_device_from_item(leaf, dev_item, device);
6921*4882a593Smuzhiyun set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6922*4882a593Smuzhiyun if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6923*4882a593Smuzhiyun !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6924*4882a593Smuzhiyun device->fs_devices->total_rw_bytes += device->total_bytes;
6925*4882a593Smuzhiyun atomic64_add(device->total_bytes - device->bytes_used,
6926*4882a593Smuzhiyun &fs_info->free_chunk_space);
6927*4882a593Smuzhiyun }
6928*4882a593Smuzhiyun ret = 0;
6929*4882a593Smuzhiyun return ret;
6930*4882a593Smuzhiyun }
6931*4882a593Smuzhiyun
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)6932*4882a593Smuzhiyun int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6933*4882a593Smuzhiyun {
6934*4882a593Smuzhiyun struct btrfs_root *root = fs_info->tree_root;
6935*4882a593Smuzhiyun struct btrfs_super_block *super_copy = fs_info->super_copy;
6936*4882a593Smuzhiyun struct extent_buffer *sb;
6937*4882a593Smuzhiyun struct btrfs_disk_key *disk_key;
6938*4882a593Smuzhiyun struct btrfs_chunk *chunk;
6939*4882a593Smuzhiyun u8 *array_ptr;
6940*4882a593Smuzhiyun unsigned long sb_array_offset;
6941*4882a593Smuzhiyun int ret = 0;
6942*4882a593Smuzhiyun u32 num_stripes;
6943*4882a593Smuzhiyun u32 array_size;
6944*4882a593Smuzhiyun u32 len = 0;
6945*4882a593Smuzhiyun u32 cur_offset;
6946*4882a593Smuzhiyun u64 type;
6947*4882a593Smuzhiyun struct btrfs_key key;
6948*4882a593Smuzhiyun
6949*4882a593Smuzhiyun ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6950*4882a593Smuzhiyun /*
6951*4882a593Smuzhiyun * This will create extent buffer of nodesize, superblock size is
6952*4882a593Smuzhiyun * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6953*4882a593Smuzhiyun * overallocate but we can keep it as-is, only the first page is used.
6954*4882a593Smuzhiyun */
6955*4882a593Smuzhiyun sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6956*4882a593Smuzhiyun if (IS_ERR(sb))
6957*4882a593Smuzhiyun return PTR_ERR(sb);
6958*4882a593Smuzhiyun set_extent_buffer_uptodate(sb);
6959*4882a593Smuzhiyun btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6960*4882a593Smuzhiyun /*
6961*4882a593Smuzhiyun * The sb extent buffer is artificial and just used to read the system array.
6962*4882a593Smuzhiyun * set_extent_buffer_uptodate() call does not properly mark all it's
6963*4882a593Smuzhiyun * pages up-to-date when the page is larger: extent does not cover the
6964*4882a593Smuzhiyun * whole page and consequently check_page_uptodate does not find all
6965*4882a593Smuzhiyun * the page's extents up-to-date (the hole beyond sb),
6966*4882a593Smuzhiyun * write_extent_buffer then triggers a WARN_ON.
6967*4882a593Smuzhiyun *
6968*4882a593Smuzhiyun * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6969*4882a593Smuzhiyun * but sb spans only this function. Add an explicit SetPageUptodate call
6970*4882a593Smuzhiyun * to silence the warning eg. on PowerPC 64.
6971*4882a593Smuzhiyun */
6972*4882a593Smuzhiyun if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6973*4882a593Smuzhiyun SetPageUptodate(sb->pages[0]);
6974*4882a593Smuzhiyun
6975*4882a593Smuzhiyun write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6976*4882a593Smuzhiyun array_size = btrfs_super_sys_array_size(super_copy);
6977*4882a593Smuzhiyun
6978*4882a593Smuzhiyun array_ptr = super_copy->sys_chunk_array;
6979*4882a593Smuzhiyun sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6980*4882a593Smuzhiyun cur_offset = 0;
6981*4882a593Smuzhiyun
6982*4882a593Smuzhiyun while (cur_offset < array_size) {
6983*4882a593Smuzhiyun disk_key = (struct btrfs_disk_key *)array_ptr;
6984*4882a593Smuzhiyun len = sizeof(*disk_key);
6985*4882a593Smuzhiyun if (cur_offset + len > array_size)
6986*4882a593Smuzhiyun goto out_short_read;
6987*4882a593Smuzhiyun
6988*4882a593Smuzhiyun btrfs_disk_key_to_cpu(&key, disk_key);
6989*4882a593Smuzhiyun
6990*4882a593Smuzhiyun array_ptr += len;
6991*4882a593Smuzhiyun sb_array_offset += len;
6992*4882a593Smuzhiyun cur_offset += len;
6993*4882a593Smuzhiyun
6994*4882a593Smuzhiyun if (key.type != BTRFS_CHUNK_ITEM_KEY) {
6995*4882a593Smuzhiyun btrfs_err(fs_info,
6996*4882a593Smuzhiyun "unexpected item type %u in sys_array at offset %u",
6997*4882a593Smuzhiyun (u32)key.type, cur_offset);
6998*4882a593Smuzhiyun ret = -EIO;
6999*4882a593Smuzhiyun break;
7000*4882a593Smuzhiyun }
7001*4882a593Smuzhiyun
7002*4882a593Smuzhiyun chunk = (struct btrfs_chunk *)sb_array_offset;
7003*4882a593Smuzhiyun /*
7004*4882a593Smuzhiyun * At least one btrfs_chunk with one stripe must be present,
7005*4882a593Smuzhiyun * exact stripe count check comes afterwards
7006*4882a593Smuzhiyun */
7007*4882a593Smuzhiyun len = btrfs_chunk_item_size(1);
7008*4882a593Smuzhiyun if (cur_offset + len > array_size)
7009*4882a593Smuzhiyun goto out_short_read;
7010*4882a593Smuzhiyun
7011*4882a593Smuzhiyun num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7012*4882a593Smuzhiyun if (!num_stripes) {
7013*4882a593Smuzhiyun btrfs_err(fs_info,
7014*4882a593Smuzhiyun "invalid number of stripes %u in sys_array at offset %u",
7015*4882a593Smuzhiyun num_stripes, cur_offset);
7016*4882a593Smuzhiyun ret = -EIO;
7017*4882a593Smuzhiyun break;
7018*4882a593Smuzhiyun }
7019*4882a593Smuzhiyun
7020*4882a593Smuzhiyun type = btrfs_chunk_type(sb, chunk);
7021*4882a593Smuzhiyun if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7022*4882a593Smuzhiyun btrfs_err(fs_info,
7023*4882a593Smuzhiyun "invalid chunk type %llu in sys_array at offset %u",
7024*4882a593Smuzhiyun type, cur_offset);
7025*4882a593Smuzhiyun ret = -EIO;
7026*4882a593Smuzhiyun break;
7027*4882a593Smuzhiyun }
7028*4882a593Smuzhiyun
7029*4882a593Smuzhiyun len = btrfs_chunk_item_size(num_stripes);
7030*4882a593Smuzhiyun if (cur_offset + len > array_size)
7031*4882a593Smuzhiyun goto out_short_read;
7032*4882a593Smuzhiyun
7033*4882a593Smuzhiyun ret = read_one_chunk(&key, sb, chunk);
7034*4882a593Smuzhiyun if (ret)
7035*4882a593Smuzhiyun break;
7036*4882a593Smuzhiyun
7037*4882a593Smuzhiyun array_ptr += len;
7038*4882a593Smuzhiyun sb_array_offset += len;
7039*4882a593Smuzhiyun cur_offset += len;
7040*4882a593Smuzhiyun }
7041*4882a593Smuzhiyun clear_extent_buffer_uptodate(sb);
7042*4882a593Smuzhiyun free_extent_buffer_stale(sb);
7043*4882a593Smuzhiyun return ret;
7044*4882a593Smuzhiyun
7045*4882a593Smuzhiyun out_short_read:
7046*4882a593Smuzhiyun btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7047*4882a593Smuzhiyun len, cur_offset);
7048*4882a593Smuzhiyun clear_extent_buffer_uptodate(sb);
7049*4882a593Smuzhiyun free_extent_buffer_stale(sb);
7050*4882a593Smuzhiyun return -EIO;
7051*4882a593Smuzhiyun }
7052*4882a593Smuzhiyun
7053*4882a593Smuzhiyun /*
7054*4882a593Smuzhiyun * Check if all chunks in the fs are OK for read-write degraded mount
7055*4882a593Smuzhiyun *
7056*4882a593Smuzhiyun * If the @failing_dev is specified, it's accounted as missing.
7057*4882a593Smuzhiyun *
7058*4882a593Smuzhiyun * Return true if all chunks meet the minimal RW mount requirements.
7059*4882a593Smuzhiyun * Return false if any chunk doesn't meet the minimal RW mount requirements.
7060*4882a593Smuzhiyun */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)7061*4882a593Smuzhiyun bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7062*4882a593Smuzhiyun struct btrfs_device *failing_dev)
7063*4882a593Smuzhiyun {
7064*4882a593Smuzhiyun struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7065*4882a593Smuzhiyun struct extent_map *em;
7066*4882a593Smuzhiyun u64 next_start = 0;
7067*4882a593Smuzhiyun bool ret = true;
7068*4882a593Smuzhiyun
7069*4882a593Smuzhiyun read_lock(&map_tree->lock);
7070*4882a593Smuzhiyun em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7071*4882a593Smuzhiyun read_unlock(&map_tree->lock);
7072*4882a593Smuzhiyun /* No chunk at all? Return false anyway */
7073*4882a593Smuzhiyun if (!em) {
7074*4882a593Smuzhiyun ret = false;
7075*4882a593Smuzhiyun goto out;
7076*4882a593Smuzhiyun }
7077*4882a593Smuzhiyun while (em) {
7078*4882a593Smuzhiyun struct map_lookup *map;
7079*4882a593Smuzhiyun int missing = 0;
7080*4882a593Smuzhiyun int max_tolerated;
7081*4882a593Smuzhiyun int i;
7082*4882a593Smuzhiyun
7083*4882a593Smuzhiyun map = em->map_lookup;
7084*4882a593Smuzhiyun max_tolerated =
7085*4882a593Smuzhiyun btrfs_get_num_tolerated_disk_barrier_failures(
7086*4882a593Smuzhiyun map->type);
7087*4882a593Smuzhiyun for (i = 0; i < map->num_stripes; i++) {
7088*4882a593Smuzhiyun struct btrfs_device *dev = map->stripes[i].dev;
7089*4882a593Smuzhiyun
7090*4882a593Smuzhiyun if (!dev || !dev->bdev ||
7091*4882a593Smuzhiyun test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7092*4882a593Smuzhiyun dev->last_flush_error)
7093*4882a593Smuzhiyun missing++;
7094*4882a593Smuzhiyun else if (failing_dev && failing_dev == dev)
7095*4882a593Smuzhiyun missing++;
7096*4882a593Smuzhiyun }
7097*4882a593Smuzhiyun if (missing > max_tolerated) {
7098*4882a593Smuzhiyun if (!failing_dev)
7099*4882a593Smuzhiyun btrfs_warn(fs_info,
7100*4882a593Smuzhiyun "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7101*4882a593Smuzhiyun em->start, missing, max_tolerated);
7102*4882a593Smuzhiyun free_extent_map(em);
7103*4882a593Smuzhiyun ret = false;
7104*4882a593Smuzhiyun goto out;
7105*4882a593Smuzhiyun }
7106*4882a593Smuzhiyun next_start = extent_map_end(em);
7107*4882a593Smuzhiyun free_extent_map(em);
7108*4882a593Smuzhiyun
7109*4882a593Smuzhiyun read_lock(&map_tree->lock);
7110*4882a593Smuzhiyun em = lookup_extent_mapping(map_tree, next_start,
7111*4882a593Smuzhiyun (u64)(-1) - next_start);
7112*4882a593Smuzhiyun read_unlock(&map_tree->lock);
7113*4882a593Smuzhiyun }
7114*4882a593Smuzhiyun out:
7115*4882a593Smuzhiyun return ret;
7116*4882a593Smuzhiyun }
7117*4882a593Smuzhiyun
readahead_tree_node_children(struct extent_buffer * node)7118*4882a593Smuzhiyun static void readahead_tree_node_children(struct extent_buffer *node)
7119*4882a593Smuzhiyun {
7120*4882a593Smuzhiyun int i;
7121*4882a593Smuzhiyun const int nr_items = btrfs_header_nritems(node);
7122*4882a593Smuzhiyun
7123*4882a593Smuzhiyun for (i = 0; i < nr_items; i++) {
7124*4882a593Smuzhiyun u64 start;
7125*4882a593Smuzhiyun
7126*4882a593Smuzhiyun start = btrfs_node_blockptr(node, i);
7127*4882a593Smuzhiyun readahead_tree_block(node->fs_info, start);
7128*4882a593Smuzhiyun }
7129*4882a593Smuzhiyun }
7130*4882a593Smuzhiyun
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)7131*4882a593Smuzhiyun int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7132*4882a593Smuzhiyun {
7133*4882a593Smuzhiyun struct btrfs_root *root = fs_info->chunk_root;
7134*4882a593Smuzhiyun struct btrfs_path *path;
7135*4882a593Smuzhiyun struct extent_buffer *leaf;
7136*4882a593Smuzhiyun struct btrfs_key key;
7137*4882a593Smuzhiyun struct btrfs_key found_key;
7138*4882a593Smuzhiyun int ret;
7139*4882a593Smuzhiyun int slot;
7140*4882a593Smuzhiyun u64 total_dev = 0;
7141*4882a593Smuzhiyun u64 last_ra_node = 0;
7142*4882a593Smuzhiyun
7143*4882a593Smuzhiyun path = btrfs_alloc_path();
7144*4882a593Smuzhiyun if (!path)
7145*4882a593Smuzhiyun return -ENOMEM;
7146*4882a593Smuzhiyun
7147*4882a593Smuzhiyun /*
7148*4882a593Smuzhiyun * uuid_mutex is needed only if we are mounting a sprout FS
7149*4882a593Smuzhiyun * otherwise we don't need it.
7150*4882a593Smuzhiyun */
7151*4882a593Smuzhiyun mutex_lock(&uuid_mutex);
7152*4882a593Smuzhiyun
7153*4882a593Smuzhiyun /*
7154*4882a593Smuzhiyun * It is possible for mount and umount to race in such a way that
7155*4882a593Smuzhiyun * we execute this code path, but open_fs_devices failed to clear
7156*4882a593Smuzhiyun * total_rw_bytes. We certainly want it cleared before reading the
7157*4882a593Smuzhiyun * device items, so clear it here.
7158*4882a593Smuzhiyun */
7159*4882a593Smuzhiyun fs_info->fs_devices->total_rw_bytes = 0;
7160*4882a593Smuzhiyun
7161*4882a593Smuzhiyun /*
7162*4882a593Smuzhiyun * Read all device items, and then all the chunk items. All
7163*4882a593Smuzhiyun * device items are found before any chunk item (their object id
7164*4882a593Smuzhiyun * is smaller than the lowest possible object id for a chunk
7165*4882a593Smuzhiyun * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7166*4882a593Smuzhiyun */
7167*4882a593Smuzhiyun key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7168*4882a593Smuzhiyun key.offset = 0;
7169*4882a593Smuzhiyun key.type = 0;
7170*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7171*4882a593Smuzhiyun if (ret < 0)
7172*4882a593Smuzhiyun goto error;
7173*4882a593Smuzhiyun while (1) {
7174*4882a593Smuzhiyun struct extent_buffer *node;
7175*4882a593Smuzhiyun
7176*4882a593Smuzhiyun leaf = path->nodes[0];
7177*4882a593Smuzhiyun slot = path->slots[0];
7178*4882a593Smuzhiyun if (slot >= btrfs_header_nritems(leaf)) {
7179*4882a593Smuzhiyun ret = btrfs_next_leaf(root, path);
7180*4882a593Smuzhiyun if (ret == 0)
7181*4882a593Smuzhiyun continue;
7182*4882a593Smuzhiyun if (ret < 0)
7183*4882a593Smuzhiyun goto error;
7184*4882a593Smuzhiyun break;
7185*4882a593Smuzhiyun }
7186*4882a593Smuzhiyun /*
7187*4882a593Smuzhiyun * The nodes on level 1 are not locked but we don't need to do
7188*4882a593Smuzhiyun * that during mount time as nothing else can access the tree
7189*4882a593Smuzhiyun */
7190*4882a593Smuzhiyun node = path->nodes[1];
7191*4882a593Smuzhiyun if (node) {
7192*4882a593Smuzhiyun if (last_ra_node != node->start) {
7193*4882a593Smuzhiyun readahead_tree_node_children(node);
7194*4882a593Smuzhiyun last_ra_node = node->start;
7195*4882a593Smuzhiyun }
7196*4882a593Smuzhiyun }
7197*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &found_key, slot);
7198*4882a593Smuzhiyun if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7199*4882a593Smuzhiyun struct btrfs_dev_item *dev_item;
7200*4882a593Smuzhiyun dev_item = btrfs_item_ptr(leaf, slot,
7201*4882a593Smuzhiyun struct btrfs_dev_item);
7202*4882a593Smuzhiyun ret = read_one_dev(leaf, dev_item);
7203*4882a593Smuzhiyun if (ret)
7204*4882a593Smuzhiyun goto error;
7205*4882a593Smuzhiyun total_dev++;
7206*4882a593Smuzhiyun } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7207*4882a593Smuzhiyun struct btrfs_chunk *chunk;
7208*4882a593Smuzhiyun chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7209*4882a593Smuzhiyun mutex_lock(&fs_info->chunk_mutex);
7210*4882a593Smuzhiyun ret = read_one_chunk(&found_key, leaf, chunk);
7211*4882a593Smuzhiyun mutex_unlock(&fs_info->chunk_mutex);
7212*4882a593Smuzhiyun if (ret)
7213*4882a593Smuzhiyun goto error;
7214*4882a593Smuzhiyun }
7215*4882a593Smuzhiyun path->slots[0]++;
7216*4882a593Smuzhiyun }
7217*4882a593Smuzhiyun
7218*4882a593Smuzhiyun /*
7219*4882a593Smuzhiyun * After loading chunk tree, we've got all device information,
7220*4882a593Smuzhiyun * do another round of validation checks.
7221*4882a593Smuzhiyun */
7222*4882a593Smuzhiyun if (total_dev != fs_info->fs_devices->total_devices) {
7223*4882a593Smuzhiyun btrfs_warn(fs_info,
7224*4882a593Smuzhiyun "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7225*4882a593Smuzhiyun btrfs_super_num_devices(fs_info->super_copy),
7226*4882a593Smuzhiyun total_dev);
7227*4882a593Smuzhiyun fs_info->fs_devices->total_devices = total_dev;
7228*4882a593Smuzhiyun btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7229*4882a593Smuzhiyun }
7230*4882a593Smuzhiyun if (btrfs_super_total_bytes(fs_info->super_copy) <
7231*4882a593Smuzhiyun fs_info->fs_devices->total_rw_bytes) {
7232*4882a593Smuzhiyun btrfs_err(fs_info,
7233*4882a593Smuzhiyun "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7234*4882a593Smuzhiyun btrfs_super_total_bytes(fs_info->super_copy),
7235*4882a593Smuzhiyun fs_info->fs_devices->total_rw_bytes);
7236*4882a593Smuzhiyun ret = -EINVAL;
7237*4882a593Smuzhiyun goto error;
7238*4882a593Smuzhiyun }
7239*4882a593Smuzhiyun ret = 0;
7240*4882a593Smuzhiyun error:
7241*4882a593Smuzhiyun mutex_unlock(&uuid_mutex);
7242*4882a593Smuzhiyun
7243*4882a593Smuzhiyun btrfs_free_path(path);
7244*4882a593Smuzhiyun return ret;
7245*4882a593Smuzhiyun }
7246*4882a593Smuzhiyun
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)7247*4882a593Smuzhiyun void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7248*4882a593Smuzhiyun {
7249*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7250*4882a593Smuzhiyun struct btrfs_device *device;
7251*4882a593Smuzhiyun
7252*4882a593Smuzhiyun fs_devices->fs_info = fs_info;
7253*4882a593Smuzhiyun
7254*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
7255*4882a593Smuzhiyun list_for_each_entry(device, &fs_devices->devices, dev_list)
7256*4882a593Smuzhiyun device->fs_info = fs_info;
7257*4882a593Smuzhiyun
7258*4882a593Smuzhiyun list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7259*4882a593Smuzhiyun list_for_each_entry(device, &seed_devs->devices, dev_list)
7260*4882a593Smuzhiyun device->fs_info = fs_info;
7261*4882a593Smuzhiyun
7262*4882a593Smuzhiyun seed_devs->fs_info = fs_info;
7263*4882a593Smuzhiyun }
7264*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
7265*4882a593Smuzhiyun }
7266*4882a593Smuzhiyun
btrfs_dev_stats_value(const struct extent_buffer * eb,const struct btrfs_dev_stats_item * ptr,int index)7267*4882a593Smuzhiyun static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7268*4882a593Smuzhiyun const struct btrfs_dev_stats_item *ptr,
7269*4882a593Smuzhiyun int index)
7270*4882a593Smuzhiyun {
7271*4882a593Smuzhiyun u64 val;
7272*4882a593Smuzhiyun
7273*4882a593Smuzhiyun read_extent_buffer(eb, &val,
7274*4882a593Smuzhiyun offsetof(struct btrfs_dev_stats_item, values) +
7275*4882a593Smuzhiyun ((unsigned long)ptr) + (index * sizeof(u64)),
7276*4882a593Smuzhiyun sizeof(val));
7277*4882a593Smuzhiyun return val;
7278*4882a593Smuzhiyun }
7279*4882a593Smuzhiyun
btrfs_set_dev_stats_value(struct extent_buffer * eb,struct btrfs_dev_stats_item * ptr,int index,u64 val)7280*4882a593Smuzhiyun static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7281*4882a593Smuzhiyun struct btrfs_dev_stats_item *ptr,
7282*4882a593Smuzhiyun int index, u64 val)
7283*4882a593Smuzhiyun {
7284*4882a593Smuzhiyun write_extent_buffer(eb, &val,
7285*4882a593Smuzhiyun offsetof(struct btrfs_dev_stats_item, values) +
7286*4882a593Smuzhiyun ((unsigned long)ptr) + (index * sizeof(u64)),
7287*4882a593Smuzhiyun sizeof(val));
7288*4882a593Smuzhiyun }
7289*4882a593Smuzhiyun
btrfs_device_init_dev_stats(struct btrfs_device * device,struct btrfs_path * path)7290*4882a593Smuzhiyun static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7291*4882a593Smuzhiyun struct btrfs_path *path)
7292*4882a593Smuzhiyun {
7293*4882a593Smuzhiyun struct btrfs_dev_stats_item *ptr;
7294*4882a593Smuzhiyun struct extent_buffer *eb;
7295*4882a593Smuzhiyun struct btrfs_key key;
7296*4882a593Smuzhiyun int item_size;
7297*4882a593Smuzhiyun int i, ret, slot;
7298*4882a593Smuzhiyun
7299*4882a593Smuzhiyun key.objectid = BTRFS_DEV_STATS_OBJECTID;
7300*4882a593Smuzhiyun key.type = BTRFS_PERSISTENT_ITEM_KEY;
7301*4882a593Smuzhiyun key.offset = device->devid;
7302*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7303*4882a593Smuzhiyun if (ret) {
7304*4882a593Smuzhiyun for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7305*4882a593Smuzhiyun btrfs_dev_stat_set(device, i, 0);
7306*4882a593Smuzhiyun device->dev_stats_valid = 1;
7307*4882a593Smuzhiyun btrfs_release_path(path);
7308*4882a593Smuzhiyun return ret < 0 ? ret : 0;
7309*4882a593Smuzhiyun }
7310*4882a593Smuzhiyun slot = path->slots[0];
7311*4882a593Smuzhiyun eb = path->nodes[0];
7312*4882a593Smuzhiyun item_size = btrfs_item_size_nr(eb, slot);
7313*4882a593Smuzhiyun
7314*4882a593Smuzhiyun ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7315*4882a593Smuzhiyun
7316*4882a593Smuzhiyun for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7317*4882a593Smuzhiyun if (item_size >= (1 + i) * sizeof(__le64))
7318*4882a593Smuzhiyun btrfs_dev_stat_set(device, i,
7319*4882a593Smuzhiyun btrfs_dev_stats_value(eb, ptr, i));
7320*4882a593Smuzhiyun else
7321*4882a593Smuzhiyun btrfs_dev_stat_set(device, i, 0);
7322*4882a593Smuzhiyun }
7323*4882a593Smuzhiyun
7324*4882a593Smuzhiyun device->dev_stats_valid = 1;
7325*4882a593Smuzhiyun btrfs_dev_stat_print_on_load(device);
7326*4882a593Smuzhiyun btrfs_release_path(path);
7327*4882a593Smuzhiyun
7328*4882a593Smuzhiyun return 0;
7329*4882a593Smuzhiyun }
7330*4882a593Smuzhiyun
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)7331*4882a593Smuzhiyun int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7332*4882a593Smuzhiyun {
7333*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7334*4882a593Smuzhiyun struct btrfs_device *device;
7335*4882a593Smuzhiyun struct btrfs_path *path = NULL;
7336*4882a593Smuzhiyun int ret = 0;
7337*4882a593Smuzhiyun
7338*4882a593Smuzhiyun path = btrfs_alloc_path();
7339*4882a593Smuzhiyun if (!path)
7340*4882a593Smuzhiyun return -ENOMEM;
7341*4882a593Smuzhiyun
7342*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
7343*4882a593Smuzhiyun list_for_each_entry(device, &fs_devices->devices, dev_list) {
7344*4882a593Smuzhiyun ret = btrfs_device_init_dev_stats(device, path);
7345*4882a593Smuzhiyun if (ret)
7346*4882a593Smuzhiyun goto out;
7347*4882a593Smuzhiyun }
7348*4882a593Smuzhiyun list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7349*4882a593Smuzhiyun list_for_each_entry(device, &seed_devs->devices, dev_list) {
7350*4882a593Smuzhiyun ret = btrfs_device_init_dev_stats(device, path);
7351*4882a593Smuzhiyun if (ret)
7352*4882a593Smuzhiyun goto out;
7353*4882a593Smuzhiyun }
7354*4882a593Smuzhiyun }
7355*4882a593Smuzhiyun out:
7356*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
7357*4882a593Smuzhiyun
7358*4882a593Smuzhiyun btrfs_free_path(path);
7359*4882a593Smuzhiyun return ret;
7360*4882a593Smuzhiyun }
7361*4882a593Smuzhiyun
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7362*4882a593Smuzhiyun static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7363*4882a593Smuzhiyun struct btrfs_device *device)
7364*4882a593Smuzhiyun {
7365*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
7366*4882a593Smuzhiyun struct btrfs_root *dev_root = fs_info->dev_root;
7367*4882a593Smuzhiyun struct btrfs_path *path;
7368*4882a593Smuzhiyun struct btrfs_key key;
7369*4882a593Smuzhiyun struct extent_buffer *eb;
7370*4882a593Smuzhiyun struct btrfs_dev_stats_item *ptr;
7371*4882a593Smuzhiyun int ret;
7372*4882a593Smuzhiyun int i;
7373*4882a593Smuzhiyun
7374*4882a593Smuzhiyun key.objectid = BTRFS_DEV_STATS_OBJECTID;
7375*4882a593Smuzhiyun key.type = BTRFS_PERSISTENT_ITEM_KEY;
7376*4882a593Smuzhiyun key.offset = device->devid;
7377*4882a593Smuzhiyun
7378*4882a593Smuzhiyun path = btrfs_alloc_path();
7379*4882a593Smuzhiyun if (!path)
7380*4882a593Smuzhiyun return -ENOMEM;
7381*4882a593Smuzhiyun ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7382*4882a593Smuzhiyun if (ret < 0) {
7383*4882a593Smuzhiyun btrfs_warn_in_rcu(fs_info,
7384*4882a593Smuzhiyun "error %d while searching for dev_stats item for device %s",
7385*4882a593Smuzhiyun ret, rcu_str_deref(device->name));
7386*4882a593Smuzhiyun goto out;
7387*4882a593Smuzhiyun }
7388*4882a593Smuzhiyun
7389*4882a593Smuzhiyun if (ret == 0 &&
7390*4882a593Smuzhiyun btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7391*4882a593Smuzhiyun /* need to delete old one and insert a new one */
7392*4882a593Smuzhiyun ret = btrfs_del_item(trans, dev_root, path);
7393*4882a593Smuzhiyun if (ret != 0) {
7394*4882a593Smuzhiyun btrfs_warn_in_rcu(fs_info,
7395*4882a593Smuzhiyun "delete too small dev_stats item for device %s failed %d",
7396*4882a593Smuzhiyun rcu_str_deref(device->name), ret);
7397*4882a593Smuzhiyun goto out;
7398*4882a593Smuzhiyun }
7399*4882a593Smuzhiyun ret = 1;
7400*4882a593Smuzhiyun }
7401*4882a593Smuzhiyun
7402*4882a593Smuzhiyun if (ret == 1) {
7403*4882a593Smuzhiyun /* need to insert a new item */
7404*4882a593Smuzhiyun btrfs_release_path(path);
7405*4882a593Smuzhiyun ret = btrfs_insert_empty_item(trans, dev_root, path,
7406*4882a593Smuzhiyun &key, sizeof(*ptr));
7407*4882a593Smuzhiyun if (ret < 0) {
7408*4882a593Smuzhiyun btrfs_warn_in_rcu(fs_info,
7409*4882a593Smuzhiyun "insert dev_stats item for device %s failed %d",
7410*4882a593Smuzhiyun rcu_str_deref(device->name), ret);
7411*4882a593Smuzhiyun goto out;
7412*4882a593Smuzhiyun }
7413*4882a593Smuzhiyun }
7414*4882a593Smuzhiyun
7415*4882a593Smuzhiyun eb = path->nodes[0];
7416*4882a593Smuzhiyun ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7417*4882a593Smuzhiyun for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7418*4882a593Smuzhiyun btrfs_set_dev_stats_value(eb, ptr, i,
7419*4882a593Smuzhiyun btrfs_dev_stat_read(device, i));
7420*4882a593Smuzhiyun btrfs_mark_buffer_dirty(eb);
7421*4882a593Smuzhiyun
7422*4882a593Smuzhiyun out:
7423*4882a593Smuzhiyun btrfs_free_path(path);
7424*4882a593Smuzhiyun return ret;
7425*4882a593Smuzhiyun }
7426*4882a593Smuzhiyun
7427*4882a593Smuzhiyun /*
7428*4882a593Smuzhiyun * called from commit_transaction. Writes all changed device stats to disk.
7429*4882a593Smuzhiyun */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans)7430*4882a593Smuzhiyun int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7431*4882a593Smuzhiyun {
7432*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
7433*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7434*4882a593Smuzhiyun struct btrfs_device *device;
7435*4882a593Smuzhiyun int stats_cnt;
7436*4882a593Smuzhiyun int ret = 0;
7437*4882a593Smuzhiyun
7438*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
7439*4882a593Smuzhiyun list_for_each_entry(device, &fs_devices->devices, dev_list) {
7440*4882a593Smuzhiyun stats_cnt = atomic_read(&device->dev_stats_ccnt);
7441*4882a593Smuzhiyun if (!device->dev_stats_valid || stats_cnt == 0)
7442*4882a593Smuzhiyun continue;
7443*4882a593Smuzhiyun
7444*4882a593Smuzhiyun
7445*4882a593Smuzhiyun /*
7446*4882a593Smuzhiyun * There is a LOAD-LOAD control dependency between the value of
7447*4882a593Smuzhiyun * dev_stats_ccnt and updating the on-disk values which requires
7448*4882a593Smuzhiyun * reading the in-memory counters. Such control dependencies
7449*4882a593Smuzhiyun * require explicit read memory barriers.
7450*4882a593Smuzhiyun *
7451*4882a593Smuzhiyun * This memory barriers pairs with smp_mb__before_atomic in
7452*4882a593Smuzhiyun * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7453*4882a593Smuzhiyun * barrier implied by atomic_xchg in
7454*4882a593Smuzhiyun * btrfs_dev_stats_read_and_reset
7455*4882a593Smuzhiyun */
7456*4882a593Smuzhiyun smp_rmb();
7457*4882a593Smuzhiyun
7458*4882a593Smuzhiyun ret = update_dev_stat_item(trans, device);
7459*4882a593Smuzhiyun if (!ret)
7460*4882a593Smuzhiyun atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7461*4882a593Smuzhiyun }
7462*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
7463*4882a593Smuzhiyun
7464*4882a593Smuzhiyun return ret;
7465*4882a593Smuzhiyun }
7466*4882a593Smuzhiyun
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7467*4882a593Smuzhiyun void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7468*4882a593Smuzhiyun {
7469*4882a593Smuzhiyun btrfs_dev_stat_inc(dev, index);
7470*4882a593Smuzhiyun btrfs_dev_stat_print_on_error(dev);
7471*4882a593Smuzhiyun }
7472*4882a593Smuzhiyun
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)7473*4882a593Smuzhiyun static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7474*4882a593Smuzhiyun {
7475*4882a593Smuzhiyun if (!dev->dev_stats_valid)
7476*4882a593Smuzhiyun return;
7477*4882a593Smuzhiyun btrfs_err_rl_in_rcu(dev->fs_info,
7478*4882a593Smuzhiyun "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7479*4882a593Smuzhiyun rcu_str_deref(dev->name),
7480*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7481*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7482*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7483*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7484*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7485*4882a593Smuzhiyun }
7486*4882a593Smuzhiyun
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7487*4882a593Smuzhiyun static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7488*4882a593Smuzhiyun {
7489*4882a593Smuzhiyun int i;
7490*4882a593Smuzhiyun
7491*4882a593Smuzhiyun for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7492*4882a593Smuzhiyun if (btrfs_dev_stat_read(dev, i) != 0)
7493*4882a593Smuzhiyun break;
7494*4882a593Smuzhiyun if (i == BTRFS_DEV_STAT_VALUES_MAX)
7495*4882a593Smuzhiyun return; /* all values == 0, suppress message */
7496*4882a593Smuzhiyun
7497*4882a593Smuzhiyun btrfs_info_in_rcu(dev->fs_info,
7498*4882a593Smuzhiyun "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7499*4882a593Smuzhiyun rcu_str_deref(dev->name),
7500*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7501*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7502*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7503*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7504*4882a593Smuzhiyun btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7505*4882a593Smuzhiyun }
7506*4882a593Smuzhiyun
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)7507*4882a593Smuzhiyun int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7508*4882a593Smuzhiyun struct btrfs_ioctl_get_dev_stats *stats)
7509*4882a593Smuzhiyun {
7510*4882a593Smuzhiyun struct btrfs_device *dev;
7511*4882a593Smuzhiyun struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7512*4882a593Smuzhiyun int i;
7513*4882a593Smuzhiyun
7514*4882a593Smuzhiyun mutex_lock(&fs_devices->device_list_mutex);
7515*4882a593Smuzhiyun dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7516*4882a593Smuzhiyun true);
7517*4882a593Smuzhiyun mutex_unlock(&fs_devices->device_list_mutex);
7518*4882a593Smuzhiyun
7519*4882a593Smuzhiyun if (!dev) {
7520*4882a593Smuzhiyun btrfs_warn(fs_info, "get dev_stats failed, device not found");
7521*4882a593Smuzhiyun return -ENODEV;
7522*4882a593Smuzhiyun } else if (!dev->dev_stats_valid) {
7523*4882a593Smuzhiyun btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7524*4882a593Smuzhiyun return -ENODEV;
7525*4882a593Smuzhiyun } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7526*4882a593Smuzhiyun for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7527*4882a593Smuzhiyun if (stats->nr_items > i)
7528*4882a593Smuzhiyun stats->values[i] =
7529*4882a593Smuzhiyun btrfs_dev_stat_read_and_reset(dev, i);
7530*4882a593Smuzhiyun else
7531*4882a593Smuzhiyun btrfs_dev_stat_set(dev, i, 0);
7532*4882a593Smuzhiyun }
7533*4882a593Smuzhiyun btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7534*4882a593Smuzhiyun current->comm, task_pid_nr(current));
7535*4882a593Smuzhiyun } else {
7536*4882a593Smuzhiyun for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7537*4882a593Smuzhiyun if (stats->nr_items > i)
7538*4882a593Smuzhiyun stats->values[i] = btrfs_dev_stat_read(dev, i);
7539*4882a593Smuzhiyun }
7540*4882a593Smuzhiyun if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7541*4882a593Smuzhiyun stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7542*4882a593Smuzhiyun return 0;
7543*4882a593Smuzhiyun }
7544*4882a593Smuzhiyun
7545*4882a593Smuzhiyun /*
7546*4882a593Smuzhiyun * Update the size and bytes used for each device where it changed. This is
7547*4882a593Smuzhiyun * delayed since we would otherwise get errors while writing out the
7548*4882a593Smuzhiyun * superblocks.
7549*4882a593Smuzhiyun *
7550*4882a593Smuzhiyun * Must be invoked during transaction commit.
7551*4882a593Smuzhiyun */
btrfs_commit_device_sizes(struct btrfs_transaction * trans)7552*4882a593Smuzhiyun void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7553*4882a593Smuzhiyun {
7554*4882a593Smuzhiyun struct btrfs_device *curr, *next;
7555*4882a593Smuzhiyun
7556*4882a593Smuzhiyun ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7557*4882a593Smuzhiyun
7558*4882a593Smuzhiyun if (list_empty(&trans->dev_update_list))
7559*4882a593Smuzhiyun return;
7560*4882a593Smuzhiyun
7561*4882a593Smuzhiyun /*
7562*4882a593Smuzhiyun * We don't need the device_list_mutex here. This list is owned by the
7563*4882a593Smuzhiyun * transaction and the transaction must complete before the device is
7564*4882a593Smuzhiyun * released.
7565*4882a593Smuzhiyun */
7566*4882a593Smuzhiyun mutex_lock(&trans->fs_info->chunk_mutex);
7567*4882a593Smuzhiyun list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7568*4882a593Smuzhiyun post_commit_list) {
7569*4882a593Smuzhiyun list_del_init(&curr->post_commit_list);
7570*4882a593Smuzhiyun curr->commit_total_bytes = curr->disk_total_bytes;
7571*4882a593Smuzhiyun curr->commit_bytes_used = curr->bytes_used;
7572*4882a593Smuzhiyun }
7573*4882a593Smuzhiyun mutex_unlock(&trans->fs_info->chunk_mutex);
7574*4882a593Smuzhiyun }
7575*4882a593Smuzhiyun
7576*4882a593Smuzhiyun /*
7577*4882a593Smuzhiyun * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7578*4882a593Smuzhiyun */
btrfs_bg_type_to_factor(u64 flags)7579*4882a593Smuzhiyun int btrfs_bg_type_to_factor(u64 flags)
7580*4882a593Smuzhiyun {
7581*4882a593Smuzhiyun const int index = btrfs_bg_flags_to_raid_index(flags);
7582*4882a593Smuzhiyun
7583*4882a593Smuzhiyun return btrfs_raid_array[index].ncopies;
7584*4882a593Smuzhiyun }
7585*4882a593Smuzhiyun
7586*4882a593Smuzhiyun
7587*4882a593Smuzhiyun
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)7588*4882a593Smuzhiyun static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7589*4882a593Smuzhiyun u64 chunk_offset, u64 devid,
7590*4882a593Smuzhiyun u64 physical_offset, u64 physical_len)
7591*4882a593Smuzhiyun {
7592*4882a593Smuzhiyun struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7593*4882a593Smuzhiyun struct extent_map *em;
7594*4882a593Smuzhiyun struct map_lookup *map;
7595*4882a593Smuzhiyun struct btrfs_device *dev;
7596*4882a593Smuzhiyun u64 stripe_len;
7597*4882a593Smuzhiyun bool found = false;
7598*4882a593Smuzhiyun int ret = 0;
7599*4882a593Smuzhiyun int i;
7600*4882a593Smuzhiyun
7601*4882a593Smuzhiyun read_lock(&em_tree->lock);
7602*4882a593Smuzhiyun em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7603*4882a593Smuzhiyun read_unlock(&em_tree->lock);
7604*4882a593Smuzhiyun
7605*4882a593Smuzhiyun if (!em) {
7606*4882a593Smuzhiyun btrfs_err(fs_info,
7607*4882a593Smuzhiyun "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7608*4882a593Smuzhiyun physical_offset, devid);
7609*4882a593Smuzhiyun ret = -EUCLEAN;
7610*4882a593Smuzhiyun goto out;
7611*4882a593Smuzhiyun }
7612*4882a593Smuzhiyun
7613*4882a593Smuzhiyun map = em->map_lookup;
7614*4882a593Smuzhiyun stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7615*4882a593Smuzhiyun if (physical_len != stripe_len) {
7616*4882a593Smuzhiyun btrfs_err(fs_info,
7617*4882a593Smuzhiyun "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7618*4882a593Smuzhiyun physical_offset, devid, em->start, physical_len,
7619*4882a593Smuzhiyun stripe_len);
7620*4882a593Smuzhiyun ret = -EUCLEAN;
7621*4882a593Smuzhiyun goto out;
7622*4882a593Smuzhiyun }
7623*4882a593Smuzhiyun
7624*4882a593Smuzhiyun for (i = 0; i < map->num_stripes; i++) {
7625*4882a593Smuzhiyun if (map->stripes[i].dev->devid == devid &&
7626*4882a593Smuzhiyun map->stripes[i].physical == physical_offset) {
7627*4882a593Smuzhiyun found = true;
7628*4882a593Smuzhiyun if (map->verified_stripes >= map->num_stripes) {
7629*4882a593Smuzhiyun btrfs_err(fs_info,
7630*4882a593Smuzhiyun "too many dev extents for chunk %llu found",
7631*4882a593Smuzhiyun em->start);
7632*4882a593Smuzhiyun ret = -EUCLEAN;
7633*4882a593Smuzhiyun goto out;
7634*4882a593Smuzhiyun }
7635*4882a593Smuzhiyun map->verified_stripes++;
7636*4882a593Smuzhiyun break;
7637*4882a593Smuzhiyun }
7638*4882a593Smuzhiyun }
7639*4882a593Smuzhiyun if (!found) {
7640*4882a593Smuzhiyun btrfs_err(fs_info,
7641*4882a593Smuzhiyun "dev extent physical offset %llu devid %llu has no corresponding chunk",
7642*4882a593Smuzhiyun physical_offset, devid);
7643*4882a593Smuzhiyun ret = -EUCLEAN;
7644*4882a593Smuzhiyun }
7645*4882a593Smuzhiyun
7646*4882a593Smuzhiyun /* Make sure no dev extent is beyond device bondary */
7647*4882a593Smuzhiyun dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7648*4882a593Smuzhiyun if (!dev) {
7649*4882a593Smuzhiyun btrfs_err(fs_info, "failed to find devid %llu", devid);
7650*4882a593Smuzhiyun ret = -EUCLEAN;
7651*4882a593Smuzhiyun goto out;
7652*4882a593Smuzhiyun }
7653*4882a593Smuzhiyun
7654*4882a593Smuzhiyun /* It's possible this device is a dummy for seed device */
7655*4882a593Smuzhiyun if (dev->disk_total_bytes == 0) {
7656*4882a593Smuzhiyun struct btrfs_fs_devices *devs;
7657*4882a593Smuzhiyun
7658*4882a593Smuzhiyun devs = list_first_entry(&fs_info->fs_devices->seed_list,
7659*4882a593Smuzhiyun struct btrfs_fs_devices, seed_list);
7660*4882a593Smuzhiyun dev = btrfs_find_device(devs, devid, NULL, NULL, false);
7661*4882a593Smuzhiyun if (!dev) {
7662*4882a593Smuzhiyun btrfs_err(fs_info, "failed to find seed devid %llu",
7663*4882a593Smuzhiyun devid);
7664*4882a593Smuzhiyun ret = -EUCLEAN;
7665*4882a593Smuzhiyun goto out;
7666*4882a593Smuzhiyun }
7667*4882a593Smuzhiyun }
7668*4882a593Smuzhiyun
7669*4882a593Smuzhiyun if (physical_offset + physical_len > dev->disk_total_bytes) {
7670*4882a593Smuzhiyun btrfs_err(fs_info,
7671*4882a593Smuzhiyun "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7672*4882a593Smuzhiyun devid, physical_offset, physical_len,
7673*4882a593Smuzhiyun dev->disk_total_bytes);
7674*4882a593Smuzhiyun ret = -EUCLEAN;
7675*4882a593Smuzhiyun goto out;
7676*4882a593Smuzhiyun }
7677*4882a593Smuzhiyun out:
7678*4882a593Smuzhiyun free_extent_map(em);
7679*4882a593Smuzhiyun return ret;
7680*4882a593Smuzhiyun }
7681*4882a593Smuzhiyun
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)7682*4882a593Smuzhiyun static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7683*4882a593Smuzhiyun {
7684*4882a593Smuzhiyun struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7685*4882a593Smuzhiyun struct extent_map *em;
7686*4882a593Smuzhiyun struct rb_node *node;
7687*4882a593Smuzhiyun int ret = 0;
7688*4882a593Smuzhiyun
7689*4882a593Smuzhiyun read_lock(&em_tree->lock);
7690*4882a593Smuzhiyun for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7691*4882a593Smuzhiyun em = rb_entry(node, struct extent_map, rb_node);
7692*4882a593Smuzhiyun if (em->map_lookup->num_stripes !=
7693*4882a593Smuzhiyun em->map_lookup->verified_stripes) {
7694*4882a593Smuzhiyun btrfs_err(fs_info,
7695*4882a593Smuzhiyun "chunk %llu has missing dev extent, have %d expect %d",
7696*4882a593Smuzhiyun em->start, em->map_lookup->verified_stripes,
7697*4882a593Smuzhiyun em->map_lookup->num_stripes);
7698*4882a593Smuzhiyun ret = -EUCLEAN;
7699*4882a593Smuzhiyun goto out;
7700*4882a593Smuzhiyun }
7701*4882a593Smuzhiyun }
7702*4882a593Smuzhiyun out:
7703*4882a593Smuzhiyun read_unlock(&em_tree->lock);
7704*4882a593Smuzhiyun return ret;
7705*4882a593Smuzhiyun }
7706*4882a593Smuzhiyun
7707*4882a593Smuzhiyun /*
7708*4882a593Smuzhiyun * Ensure that all dev extents are mapped to correct chunk, otherwise
7709*4882a593Smuzhiyun * later chunk allocation/free would cause unexpected behavior.
7710*4882a593Smuzhiyun *
7711*4882a593Smuzhiyun * NOTE: This will iterate through the whole device tree, which should be of
7712*4882a593Smuzhiyun * the same size level as the chunk tree. This slightly increases mount time.
7713*4882a593Smuzhiyun */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)7714*4882a593Smuzhiyun int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7715*4882a593Smuzhiyun {
7716*4882a593Smuzhiyun struct btrfs_path *path;
7717*4882a593Smuzhiyun struct btrfs_root *root = fs_info->dev_root;
7718*4882a593Smuzhiyun struct btrfs_key key;
7719*4882a593Smuzhiyun u64 prev_devid = 0;
7720*4882a593Smuzhiyun u64 prev_dev_ext_end = 0;
7721*4882a593Smuzhiyun int ret = 0;
7722*4882a593Smuzhiyun
7723*4882a593Smuzhiyun key.objectid = 1;
7724*4882a593Smuzhiyun key.type = BTRFS_DEV_EXTENT_KEY;
7725*4882a593Smuzhiyun key.offset = 0;
7726*4882a593Smuzhiyun
7727*4882a593Smuzhiyun path = btrfs_alloc_path();
7728*4882a593Smuzhiyun if (!path)
7729*4882a593Smuzhiyun return -ENOMEM;
7730*4882a593Smuzhiyun
7731*4882a593Smuzhiyun path->reada = READA_FORWARD;
7732*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7733*4882a593Smuzhiyun if (ret < 0)
7734*4882a593Smuzhiyun goto out;
7735*4882a593Smuzhiyun
7736*4882a593Smuzhiyun if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7737*4882a593Smuzhiyun ret = btrfs_next_item(root, path);
7738*4882a593Smuzhiyun if (ret < 0)
7739*4882a593Smuzhiyun goto out;
7740*4882a593Smuzhiyun /* No dev extents at all? Not good */
7741*4882a593Smuzhiyun if (ret > 0) {
7742*4882a593Smuzhiyun ret = -EUCLEAN;
7743*4882a593Smuzhiyun goto out;
7744*4882a593Smuzhiyun }
7745*4882a593Smuzhiyun }
7746*4882a593Smuzhiyun while (1) {
7747*4882a593Smuzhiyun struct extent_buffer *leaf = path->nodes[0];
7748*4882a593Smuzhiyun struct btrfs_dev_extent *dext;
7749*4882a593Smuzhiyun int slot = path->slots[0];
7750*4882a593Smuzhiyun u64 chunk_offset;
7751*4882a593Smuzhiyun u64 physical_offset;
7752*4882a593Smuzhiyun u64 physical_len;
7753*4882a593Smuzhiyun u64 devid;
7754*4882a593Smuzhiyun
7755*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &key, slot);
7756*4882a593Smuzhiyun if (key.type != BTRFS_DEV_EXTENT_KEY)
7757*4882a593Smuzhiyun break;
7758*4882a593Smuzhiyun devid = key.objectid;
7759*4882a593Smuzhiyun physical_offset = key.offset;
7760*4882a593Smuzhiyun
7761*4882a593Smuzhiyun dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7762*4882a593Smuzhiyun chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7763*4882a593Smuzhiyun physical_len = btrfs_dev_extent_length(leaf, dext);
7764*4882a593Smuzhiyun
7765*4882a593Smuzhiyun /* Check if this dev extent overlaps with the previous one */
7766*4882a593Smuzhiyun if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7767*4882a593Smuzhiyun btrfs_err(fs_info,
7768*4882a593Smuzhiyun "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7769*4882a593Smuzhiyun devid, physical_offset, prev_dev_ext_end);
7770*4882a593Smuzhiyun ret = -EUCLEAN;
7771*4882a593Smuzhiyun goto out;
7772*4882a593Smuzhiyun }
7773*4882a593Smuzhiyun
7774*4882a593Smuzhiyun ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7775*4882a593Smuzhiyun physical_offset, physical_len);
7776*4882a593Smuzhiyun if (ret < 0)
7777*4882a593Smuzhiyun goto out;
7778*4882a593Smuzhiyun prev_devid = devid;
7779*4882a593Smuzhiyun prev_dev_ext_end = physical_offset + physical_len;
7780*4882a593Smuzhiyun
7781*4882a593Smuzhiyun ret = btrfs_next_item(root, path);
7782*4882a593Smuzhiyun if (ret < 0)
7783*4882a593Smuzhiyun goto out;
7784*4882a593Smuzhiyun if (ret > 0) {
7785*4882a593Smuzhiyun ret = 0;
7786*4882a593Smuzhiyun break;
7787*4882a593Smuzhiyun }
7788*4882a593Smuzhiyun }
7789*4882a593Smuzhiyun
7790*4882a593Smuzhiyun /* Ensure all chunks have corresponding dev extents */
7791*4882a593Smuzhiyun ret = verify_chunk_dev_extent_mapping(fs_info);
7792*4882a593Smuzhiyun out:
7793*4882a593Smuzhiyun btrfs_free_path(path);
7794*4882a593Smuzhiyun return ret;
7795*4882a593Smuzhiyun }
7796*4882a593Smuzhiyun
7797*4882a593Smuzhiyun /*
7798*4882a593Smuzhiyun * Check whether the given block group or device is pinned by any inode being
7799*4882a593Smuzhiyun * used as a swapfile.
7800*4882a593Smuzhiyun */
btrfs_pinned_by_swapfile(struct btrfs_fs_info * fs_info,void * ptr)7801*4882a593Smuzhiyun bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7802*4882a593Smuzhiyun {
7803*4882a593Smuzhiyun struct btrfs_swapfile_pin *sp;
7804*4882a593Smuzhiyun struct rb_node *node;
7805*4882a593Smuzhiyun
7806*4882a593Smuzhiyun spin_lock(&fs_info->swapfile_pins_lock);
7807*4882a593Smuzhiyun node = fs_info->swapfile_pins.rb_node;
7808*4882a593Smuzhiyun while (node) {
7809*4882a593Smuzhiyun sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7810*4882a593Smuzhiyun if (ptr < sp->ptr)
7811*4882a593Smuzhiyun node = node->rb_left;
7812*4882a593Smuzhiyun else if (ptr > sp->ptr)
7813*4882a593Smuzhiyun node = node->rb_right;
7814*4882a593Smuzhiyun else
7815*4882a593Smuzhiyun break;
7816*4882a593Smuzhiyun }
7817*4882a593Smuzhiyun spin_unlock(&fs_info->swapfile_pins_lock);
7818*4882a593Smuzhiyun return node != NULL;
7819*4882a593Smuzhiyun }
7820