1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun md.h : kernel internal structure of the Linux MD driver
4*4882a593Smuzhiyun Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef _MD_MD_H
9*4882a593Smuzhiyun #define _MD_MD_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/blkdev.h>
12*4882a593Smuzhiyun #include <linux/backing-dev.h>
13*4882a593Smuzhiyun #include <linux/badblocks.h>
14*4882a593Smuzhiyun #include <linux/kobject.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/mm.h>
17*4882a593Smuzhiyun #include <linux/mutex.h>
18*4882a593Smuzhiyun #include <linux/timer.h>
19*4882a593Smuzhiyun #include <linux/wait.h>
20*4882a593Smuzhiyun #include <linux/workqueue.h>
21*4882a593Smuzhiyun #include "md-cluster.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define MaxSector (~(sector_t)0)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * These flags should really be called "NO_RETRY" rather than
27*4882a593Smuzhiyun * "FAILFAST" because they don't make any promise about time lapse,
28*4882a593Smuzhiyun * only about the number of retries, which will be zero.
29*4882a593Smuzhiyun * REQ_FAILFAST_DRIVER is not included because
30*4882a593Smuzhiyun * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
31*4882a593Smuzhiyun * seems to suggest that the errors it avoids retrying should usually
32*4882a593Smuzhiyun * be retried.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * The struct embedded in rdev is used to serialize IO.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun struct serial_in_rdev {
40*4882a593Smuzhiyun struct rb_root_cached serial_rb;
41*4882a593Smuzhiyun spinlock_t serial_lock;
42*4882a593Smuzhiyun wait_queue_head_t serial_io_wait;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * MD's 'extended' device
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun struct md_rdev {
49*4882a593Smuzhiyun struct list_head same_set; /* RAID devices within the same set */
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun sector_t sectors; /* Device size (in 512bytes sectors) */
52*4882a593Smuzhiyun struct mddev *mddev; /* RAID array if running */
53*4882a593Smuzhiyun int last_events; /* IO event timestamp */
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * If meta_bdev is non-NULL, it means that a separate device is
57*4882a593Smuzhiyun * being used to store the metadata (superblock/bitmap) which
58*4882a593Smuzhiyun * would otherwise be contained on the same device as the data (bdev).
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun struct block_device *meta_bdev;
61*4882a593Smuzhiyun struct block_device *bdev; /* block device handle */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct page *sb_page, *bb_page;
64*4882a593Smuzhiyun int sb_loaded;
65*4882a593Smuzhiyun __u64 sb_events;
66*4882a593Smuzhiyun sector_t data_offset; /* start of data in array */
67*4882a593Smuzhiyun sector_t new_data_offset;/* only relevant while reshaping */
68*4882a593Smuzhiyun sector_t sb_start; /* offset of the super block (in 512byte sectors) */
69*4882a593Smuzhiyun int sb_size; /* bytes in the superblock */
70*4882a593Smuzhiyun int preferred_minor; /* autorun support */
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct kobject kobj;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* A device can be in one of three states based on two flags:
75*4882a593Smuzhiyun * Not working: faulty==1 in_sync==0
76*4882a593Smuzhiyun * Fully working: faulty==0 in_sync==1
77*4882a593Smuzhiyun * Working, but not
78*4882a593Smuzhiyun * in sync with array
79*4882a593Smuzhiyun * faulty==0 in_sync==0
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * It can never have faulty==1, in_sync==1
82*4882a593Smuzhiyun * This reduces the burden of testing multiple flags in many cases
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun unsigned long flags; /* bit set of 'enum flag_bits' bits. */
86*4882a593Smuzhiyun wait_queue_head_t blocked_wait;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun int desc_nr; /* descriptor index in the superblock */
89*4882a593Smuzhiyun int raid_disk; /* role of device in array */
90*4882a593Smuzhiyun int new_raid_disk; /* role that the device will have in
91*4882a593Smuzhiyun * the array after a level-change completes.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun int saved_raid_disk; /* role that device used to have in the
94*4882a593Smuzhiyun * array and could again if we did a partial
95*4882a593Smuzhiyun * resync from the bitmap
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun union {
98*4882a593Smuzhiyun sector_t recovery_offset;/* If this device has been partially
99*4882a593Smuzhiyun * recovered, this is where we were
100*4882a593Smuzhiyun * up to.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun sector_t journal_tail; /* If this device is a journal device,
103*4882a593Smuzhiyun * this is the journal tail (journal
104*4882a593Smuzhiyun * recovery start point)
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun atomic_t nr_pending; /* number of pending requests.
109*4882a593Smuzhiyun * only maintained for arrays that
110*4882a593Smuzhiyun * support hot removal
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun atomic_t read_errors; /* number of consecutive read errors that
113*4882a593Smuzhiyun * we have tried to ignore.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun time64_t last_read_error; /* monotonic time since our
116*4882a593Smuzhiyun * last read error
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun atomic_t corrected_errors; /* number of corrected read errors,
119*4882a593Smuzhiyun * for reporting to userspace and storing
120*4882a593Smuzhiyun * in superblock.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun struct serial_in_rdev *serial; /* used for raid1 io serialization */
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun struct work_struct del_work; /* used for delayed sysfs removal */
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun struct kernfs_node *sysfs_state; /* handle for 'state'
128*4882a593Smuzhiyun * sysfs entry */
129*4882a593Smuzhiyun /* handle for 'unacknowledged_bad_blocks' sysfs dentry */
130*4882a593Smuzhiyun struct kernfs_node *sysfs_unack_badblocks;
131*4882a593Smuzhiyun /* handle for 'bad_blocks' sysfs dentry */
132*4882a593Smuzhiyun struct kernfs_node *sysfs_badblocks;
133*4882a593Smuzhiyun struct badblocks badblocks;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun struct {
136*4882a593Smuzhiyun short offset; /* Offset from superblock to start of PPL.
137*4882a593Smuzhiyun * Not used by external metadata. */
138*4882a593Smuzhiyun unsigned int size; /* Size in sectors of the PPL space */
139*4882a593Smuzhiyun sector_t sector; /* First sector of the PPL space */
140*4882a593Smuzhiyun } ppl;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun enum flag_bits {
143*4882a593Smuzhiyun Faulty, /* device is known to have a fault */
144*4882a593Smuzhiyun In_sync, /* device is in_sync with rest of array */
145*4882a593Smuzhiyun Bitmap_sync, /* ..actually, not quite In_sync. Need a
146*4882a593Smuzhiyun * bitmap-based recovery to get fully in sync.
147*4882a593Smuzhiyun * The bit is only meaningful before device
148*4882a593Smuzhiyun * has been passed to pers->hot_add_disk.
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun WriteMostly, /* Avoid reading if at all possible */
151*4882a593Smuzhiyun AutoDetected, /* added by auto-detect */
152*4882a593Smuzhiyun Blocked, /* An error occurred but has not yet
153*4882a593Smuzhiyun * been acknowledged by the metadata
154*4882a593Smuzhiyun * handler, so don't allow writes
155*4882a593Smuzhiyun * until it is cleared */
156*4882a593Smuzhiyun WriteErrorSeen, /* A write error has been seen on this
157*4882a593Smuzhiyun * device
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun FaultRecorded, /* Intermediate state for clearing
160*4882a593Smuzhiyun * Blocked. The Fault is/will-be
161*4882a593Smuzhiyun * recorded in the metadata, but that
162*4882a593Smuzhiyun * metadata hasn't been stored safely
163*4882a593Smuzhiyun * on disk yet.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun BlockedBadBlocks, /* A writer is blocked because they
166*4882a593Smuzhiyun * found an unacknowledged bad-block.
167*4882a593Smuzhiyun * This can safely be cleared at any
168*4882a593Smuzhiyun * time, and the writer will re-check.
169*4882a593Smuzhiyun * It may be set at any time, and at
170*4882a593Smuzhiyun * worst the writer will timeout and
171*4882a593Smuzhiyun * re-check. So setting it as
172*4882a593Smuzhiyun * accurately as possible is good, but
173*4882a593Smuzhiyun * not absolutely critical.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun WantReplacement, /* This device is a candidate to be
176*4882a593Smuzhiyun * hot-replaced, either because it has
177*4882a593Smuzhiyun * reported some faults, or because
178*4882a593Smuzhiyun * of explicit request.
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun Replacement, /* This device is a replacement for
181*4882a593Smuzhiyun * a want_replacement device with same
182*4882a593Smuzhiyun * raid_disk number.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun Candidate, /* For clustered environments only:
185*4882a593Smuzhiyun * This device is seen locally but not
186*4882a593Smuzhiyun * by the whole cluster
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun Journal, /* This device is used as journal for
189*4882a593Smuzhiyun * raid-5/6.
190*4882a593Smuzhiyun * Usually, this device should be faster
191*4882a593Smuzhiyun * than other devices in the array
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun ClusterRemove,
194*4882a593Smuzhiyun RemoveSynchronized, /* synchronize_rcu() was called after
195*4882a593Smuzhiyun * this device was known to be faulty,
196*4882a593Smuzhiyun * so it is safe to remove without
197*4882a593Smuzhiyun * another synchronize_rcu() call.
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun ExternalBbl, /* External metadata provides bad
200*4882a593Smuzhiyun * block management for a disk
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun FailFast, /* Minimal retries should be attempted on
203*4882a593Smuzhiyun * this device, so use REQ_FAILFAST_DEV.
204*4882a593Smuzhiyun * Also don't try to repair failed reads.
205*4882a593Smuzhiyun * It is expects that no bad block log
206*4882a593Smuzhiyun * is present.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun LastDev, /* Seems to be the last working dev as
209*4882a593Smuzhiyun * it didn't fail, so don't use FailFast
210*4882a593Smuzhiyun * any more for metadata
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun CollisionCheck, /*
213*4882a593Smuzhiyun * check if there is collision between raid1
214*4882a593Smuzhiyun * serial bios.
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun
is_badblock(struct md_rdev * rdev,sector_t s,int sectors,sector_t * first_bad,int * bad_sectors)218*4882a593Smuzhiyun static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
219*4882a593Smuzhiyun sector_t *first_bad, int *bad_sectors)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun if (unlikely(rdev->badblocks.count)) {
222*4882a593Smuzhiyun int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
223*4882a593Smuzhiyun sectors,
224*4882a593Smuzhiyun first_bad, bad_sectors);
225*4882a593Smuzhiyun if (rv)
226*4882a593Smuzhiyun *first_bad -= rdev->data_offset;
227*4882a593Smuzhiyun return rv;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun return 0;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
232*4882a593Smuzhiyun int is_new);
233*4882a593Smuzhiyun extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
234*4882a593Smuzhiyun int is_new);
235*4882a593Smuzhiyun struct md_cluster_info;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
238*4882a593Smuzhiyun enum mddev_flags {
239*4882a593Smuzhiyun MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
240*4882a593Smuzhiyun MD_CLOSING, /* If set, we are closing the array, do not open
241*4882a593Smuzhiyun * it then */
242*4882a593Smuzhiyun MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
243*4882a593Smuzhiyun MD_HAS_JOURNAL, /* The raid array has journal feature set */
244*4882a593Smuzhiyun MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
245*4882a593Smuzhiyun * already took resync lock, need to
246*4882a593Smuzhiyun * release the lock */
247*4882a593Smuzhiyun MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
248*4882a593Smuzhiyun * supported as calls to md_error() will
249*4882a593Smuzhiyun * never cause the array to become failed.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun MD_HAS_PPL, /* The raid array has PPL feature set */
252*4882a593Smuzhiyun MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */
253*4882a593Smuzhiyun MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update
254*4882a593Smuzhiyun * the metadata without taking reconfig_mutex.
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun MD_UPDATING_SB, /* md_check_recovery is updating the metadata
257*4882a593Smuzhiyun * without explicitly holding reconfig_mutex.
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun MD_NOT_READY, /* do_md_run() is active, so 'array_state'
260*4882a593Smuzhiyun * must not report that array is ready yet
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun MD_BROKEN, /* This is used in RAID-0/LINEAR only, to stop
263*4882a593Smuzhiyun * I/O in case an array member is gone/failed.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun };
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun enum mddev_sb_flags {
268*4882a593Smuzhiyun MD_SB_CHANGE_DEVS, /* Some device status has changed */
269*4882a593Smuzhiyun MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
270*4882a593Smuzhiyun MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
271*4882a593Smuzhiyun MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
272*4882a593Smuzhiyun };
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun #define NR_SERIAL_INFOS 8
275*4882a593Smuzhiyun /* record current range of serialize IOs */
276*4882a593Smuzhiyun struct serial_info {
277*4882a593Smuzhiyun struct rb_node node;
278*4882a593Smuzhiyun sector_t start; /* start sector of rb node */
279*4882a593Smuzhiyun sector_t last; /* end sector of rb node */
280*4882a593Smuzhiyun sector_t _subtree_last; /* highest sector in subtree of rb node */
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun struct mddev {
284*4882a593Smuzhiyun void *private;
285*4882a593Smuzhiyun struct md_personality *pers;
286*4882a593Smuzhiyun dev_t unit;
287*4882a593Smuzhiyun int md_minor;
288*4882a593Smuzhiyun struct list_head disks;
289*4882a593Smuzhiyun unsigned long flags;
290*4882a593Smuzhiyun unsigned long sb_flags;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun int suspended;
293*4882a593Smuzhiyun atomic_t active_io;
294*4882a593Smuzhiyun int ro;
295*4882a593Smuzhiyun int sysfs_active; /* set when sysfs deletes
296*4882a593Smuzhiyun * are happening, so run/
297*4882a593Smuzhiyun * takeover/stop are not safe
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun struct gendisk *gendisk;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun struct kobject kobj;
302*4882a593Smuzhiyun int hold_active;
303*4882a593Smuzhiyun #define UNTIL_IOCTL 1
304*4882a593Smuzhiyun #define UNTIL_STOP 2
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* Superblock information */
307*4882a593Smuzhiyun int major_version,
308*4882a593Smuzhiyun minor_version,
309*4882a593Smuzhiyun patch_version;
310*4882a593Smuzhiyun int persistent;
311*4882a593Smuzhiyun int external; /* metadata is
312*4882a593Smuzhiyun * managed externally */
313*4882a593Smuzhiyun char metadata_type[17]; /* externally set*/
314*4882a593Smuzhiyun int chunk_sectors;
315*4882a593Smuzhiyun time64_t ctime, utime;
316*4882a593Smuzhiyun int level, layout;
317*4882a593Smuzhiyun char clevel[16];
318*4882a593Smuzhiyun int raid_disks;
319*4882a593Smuzhiyun int max_disks;
320*4882a593Smuzhiyun sector_t dev_sectors; /* used size of
321*4882a593Smuzhiyun * component devices */
322*4882a593Smuzhiyun sector_t array_sectors; /* exported array size */
323*4882a593Smuzhiyun int external_size; /* size managed
324*4882a593Smuzhiyun * externally */
325*4882a593Smuzhiyun __u64 events;
326*4882a593Smuzhiyun /* If the last 'event' was simply a clean->dirty transition, and
327*4882a593Smuzhiyun * we didn't write it to the spares, then it is safe and simple
328*4882a593Smuzhiyun * to just decrement the event count on a dirty->clean transition.
329*4882a593Smuzhiyun * So we record that possibility here.
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun int can_decrease_events;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun char uuid[16];
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* If the array is being reshaped, we need to record the
336*4882a593Smuzhiyun * new shape and an indication of where we are up to.
337*4882a593Smuzhiyun * This is written to the superblock.
338*4882a593Smuzhiyun * If reshape_position is MaxSector, then no reshape is happening (yet).
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun sector_t reshape_position;
341*4882a593Smuzhiyun int delta_disks, new_level, new_layout;
342*4882a593Smuzhiyun int new_chunk_sectors;
343*4882a593Smuzhiyun int reshape_backwards;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun struct md_thread *thread; /* management thread */
346*4882a593Smuzhiyun struct md_thread *sync_thread; /* doing resync or reconstruct */
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* 'last_sync_action' is initialized to "none". It is set when a
349*4882a593Smuzhiyun * sync operation (i.e "data-check", "requested-resync", "resync",
350*4882a593Smuzhiyun * "recovery", or "reshape") is started. It holds this value even
351*4882a593Smuzhiyun * when the sync thread is "frozen" (interrupted) or "idle" (stopped
352*4882a593Smuzhiyun * or finished). It is overwritten when a new sync operation is begun.
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun char *last_sync_action;
355*4882a593Smuzhiyun sector_t curr_resync; /* last block scheduled */
356*4882a593Smuzhiyun /* As resync requests can complete out of order, we cannot easily track
357*4882a593Smuzhiyun * how much resync has been completed. So we occasionally pause until
358*4882a593Smuzhiyun * everything completes, then set curr_resync_completed to curr_resync.
359*4882a593Smuzhiyun * As such it may be well behind the real resync mark, but it is a value
360*4882a593Smuzhiyun * we are certain of.
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun sector_t curr_resync_completed;
363*4882a593Smuzhiyun unsigned long resync_mark; /* a recent timestamp */
364*4882a593Smuzhiyun sector_t resync_mark_cnt;/* blocks written at resync_mark */
365*4882a593Smuzhiyun sector_t curr_mark_cnt; /* blocks scheduled now */
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun sector_t resync_max_sectors; /* may be set by personality */
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun atomic64_t resync_mismatches; /* count of sectors where
370*4882a593Smuzhiyun * parity/replica mismatch found
371*4882a593Smuzhiyun */
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* allow user-space to request suspension of IO to regions of the array */
374*4882a593Smuzhiyun sector_t suspend_lo;
375*4882a593Smuzhiyun sector_t suspend_hi;
376*4882a593Smuzhiyun /* if zero, use the system-wide default */
377*4882a593Smuzhiyun int sync_speed_min;
378*4882a593Smuzhiyun int sync_speed_max;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* resync even though the same disks are shared among md-devices */
381*4882a593Smuzhiyun int parallel_resync;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun int ok_start_degraded;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun unsigned long recovery;
386*4882a593Smuzhiyun /* If a RAID personality determines that recovery (of a particular
387*4882a593Smuzhiyun * device) will fail due to a read error on the source device, it
388*4882a593Smuzhiyun * takes a copy of this number and does not attempt recovery again
389*4882a593Smuzhiyun * until this number changes.
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun int recovery_disabled;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun int in_sync; /* know to not need resync */
394*4882a593Smuzhiyun /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
395*4882a593Smuzhiyun * that we are never stopping an array while it is open.
396*4882a593Smuzhiyun * 'reconfig_mutex' protects all other reconfiguration.
397*4882a593Smuzhiyun * These locks are separate due to conflicting interactions
398*4882a593Smuzhiyun * with bdev->bd_mutex.
399*4882a593Smuzhiyun * Lock ordering is:
400*4882a593Smuzhiyun * reconfig_mutex -> bd_mutex
401*4882a593Smuzhiyun * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun struct mutex open_mutex;
404*4882a593Smuzhiyun struct mutex reconfig_mutex;
405*4882a593Smuzhiyun atomic_t active; /* general refcount */
406*4882a593Smuzhiyun atomic_t openers; /* number of active opens */
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun int changed; /* True if we might need to
409*4882a593Smuzhiyun * reread partition info */
410*4882a593Smuzhiyun int degraded; /* whether md should consider
411*4882a593Smuzhiyun * adding a spare
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun atomic_t recovery_active; /* blocks scheduled, but not written */
415*4882a593Smuzhiyun wait_queue_head_t recovery_wait;
416*4882a593Smuzhiyun sector_t recovery_cp;
417*4882a593Smuzhiyun sector_t resync_min; /* user requested sync
418*4882a593Smuzhiyun * starts here */
419*4882a593Smuzhiyun sector_t resync_max; /* resync should pause
420*4882a593Smuzhiyun * when it gets here */
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun struct kernfs_node *sysfs_state; /* handle for 'array_state'
423*4882a593Smuzhiyun * file in sysfs.
424*4882a593Smuzhiyun */
425*4882a593Smuzhiyun struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
426*4882a593Smuzhiyun struct kernfs_node *sysfs_completed; /*handle for 'sync_completed' */
427*4882a593Smuzhiyun struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */
428*4882a593Smuzhiyun struct kernfs_node *sysfs_level; /*handle for 'level' */
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun struct work_struct del_work; /* used for delayed sysfs removal */
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* "lock" protects:
433*4882a593Smuzhiyun * flush_bio transition from NULL to !NULL
434*4882a593Smuzhiyun * rdev superblocks, events
435*4882a593Smuzhiyun * clearing MD_CHANGE_*
436*4882a593Smuzhiyun * in_sync - and related safemode and MD_CHANGE changes
437*4882a593Smuzhiyun * pers (also protected by reconfig_mutex and pending IO).
438*4882a593Smuzhiyun * clearing ->bitmap
439*4882a593Smuzhiyun * clearing ->bitmap_info.file
440*4882a593Smuzhiyun * changing ->resync_{min,max}
441*4882a593Smuzhiyun * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun spinlock_t lock;
444*4882a593Smuzhiyun wait_queue_head_t sb_wait; /* for waiting on superblock updates */
445*4882a593Smuzhiyun atomic_t pending_writes; /* number of active superblock writes */
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun unsigned int safemode; /* if set, update "clean" superblock
448*4882a593Smuzhiyun * when no writes pending.
449*4882a593Smuzhiyun */
450*4882a593Smuzhiyun unsigned int safemode_delay;
451*4882a593Smuzhiyun struct timer_list safemode_timer;
452*4882a593Smuzhiyun struct percpu_ref writes_pending;
453*4882a593Smuzhiyun int sync_checkers; /* # of threads checking writes_pending */
454*4882a593Smuzhiyun struct request_queue *queue; /* for plugging ... */
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun struct bitmap *bitmap; /* the bitmap for the device */
457*4882a593Smuzhiyun struct {
458*4882a593Smuzhiyun struct file *file; /* the bitmap file */
459*4882a593Smuzhiyun loff_t offset; /* offset from superblock of
460*4882a593Smuzhiyun * start of bitmap. May be
461*4882a593Smuzhiyun * negative, but not '0'
462*4882a593Smuzhiyun * For external metadata, offset
463*4882a593Smuzhiyun * from start of device.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun unsigned long space; /* space available at this offset */
466*4882a593Smuzhiyun loff_t default_offset; /* this is the offset to use when
467*4882a593Smuzhiyun * hot-adding a bitmap. It should
468*4882a593Smuzhiyun * eventually be settable by sysfs.
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun unsigned long default_space; /* space available at
471*4882a593Smuzhiyun * default offset */
472*4882a593Smuzhiyun struct mutex mutex;
473*4882a593Smuzhiyun unsigned long chunksize;
474*4882a593Smuzhiyun unsigned long daemon_sleep; /* how many jiffies between updates? */
475*4882a593Smuzhiyun unsigned long max_write_behind; /* write-behind mode */
476*4882a593Smuzhiyun int external;
477*4882a593Smuzhiyun int nodes; /* Maximum number of nodes in the cluster */
478*4882a593Smuzhiyun char cluster_name[64]; /* Name of the cluster */
479*4882a593Smuzhiyun } bitmap_info;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun atomic_t max_corr_read_errors; /* max read retries */
482*4882a593Smuzhiyun struct list_head all_mddevs;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun struct attribute_group *to_remove;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun struct bio_set bio_set;
487*4882a593Smuzhiyun struct bio_set sync_set; /* for sync operations like
488*4882a593Smuzhiyun * metadata and bitmap writes
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* Generic flush handling.
492*4882a593Smuzhiyun * The last to finish preflush schedules a worker to submit
493*4882a593Smuzhiyun * the rest of the request (without the REQ_PREFLUSH flag).
494*4882a593Smuzhiyun */
495*4882a593Smuzhiyun struct bio *flush_bio;
496*4882a593Smuzhiyun atomic_t flush_pending;
497*4882a593Smuzhiyun ktime_t start_flush, last_flush; /* last_flush is when the last completed
498*4882a593Smuzhiyun * flush was started.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun struct work_struct flush_work;
501*4882a593Smuzhiyun struct work_struct event_work; /* used by dm to report failure event */
502*4882a593Smuzhiyun mempool_t *serial_info_pool;
503*4882a593Smuzhiyun void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
504*4882a593Smuzhiyun struct md_cluster_info *cluster_info;
505*4882a593Smuzhiyun unsigned int good_device_nr; /* good device num within cluster raid */
506*4882a593Smuzhiyun unsigned int noio_flag; /* for memalloc scope API */
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun bool has_superblocks:1;
509*4882a593Smuzhiyun bool fail_last_dev:1;
510*4882a593Smuzhiyun bool serialize_policy:1;
511*4882a593Smuzhiyun };
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun enum recovery_flags {
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun * If neither SYNC or RESHAPE are set, then it is a recovery.
516*4882a593Smuzhiyun */
517*4882a593Smuzhiyun MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
518*4882a593Smuzhiyun MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
519*4882a593Smuzhiyun MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
520*4882a593Smuzhiyun MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
521*4882a593Smuzhiyun MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
522*4882a593Smuzhiyun MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
523*4882a593Smuzhiyun MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
524*4882a593Smuzhiyun MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
525*4882a593Smuzhiyun MD_RECOVERY_RESHAPE, /* A reshape is happening */
526*4882a593Smuzhiyun MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
527*4882a593Smuzhiyun MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
528*4882a593Smuzhiyun MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
529*4882a593Smuzhiyun MD_RESYNCING_REMOTE, /* remote node is running resync thread */
530*4882a593Smuzhiyun };
531*4882a593Smuzhiyun
mddev_lock(struct mddev * mddev)532*4882a593Smuzhiyun static inline int __must_check mddev_lock(struct mddev *mddev)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun return mutex_lock_interruptible(&mddev->reconfig_mutex);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* Sometimes we need to take the lock in a situation where
538*4882a593Smuzhiyun * failure due to interrupts is not acceptable.
539*4882a593Smuzhiyun */
mddev_lock_nointr(struct mddev * mddev)540*4882a593Smuzhiyun static inline void mddev_lock_nointr(struct mddev *mddev)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun mutex_lock(&mddev->reconfig_mutex);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
mddev_trylock(struct mddev * mddev)545*4882a593Smuzhiyun static inline int mddev_trylock(struct mddev *mddev)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun return mutex_trylock(&mddev->reconfig_mutex);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun extern void mddev_unlock(struct mddev *mddev);
550*4882a593Smuzhiyun
md_sync_acct(struct block_device * bdev,unsigned long nr_sectors)551*4882a593Smuzhiyun static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
md_sync_acct_bio(struct bio * bio,unsigned long nr_sectors)556*4882a593Smuzhiyun static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun atomic_add(nr_sectors, &bio->bi_disk->sync_io);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun struct md_personality
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun char *name;
564*4882a593Smuzhiyun int level;
565*4882a593Smuzhiyun struct list_head list;
566*4882a593Smuzhiyun struct module *owner;
567*4882a593Smuzhiyun bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * start up works that do NOT require md_thread. tasks that
570*4882a593Smuzhiyun * requires md_thread should go into start()
571*4882a593Smuzhiyun */
572*4882a593Smuzhiyun int (*run)(struct mddev *mddev);
573*4882a593Smuzhiyun /* start up works that require md threads */
574*4882a593Smuzhiyun int (*start)(struct mddev *mddev);
575*4882a593Smuzhiyun void (*free)(struct mddev *mddev, void *priv);
576*4882a593Smuzhiyun void (*status)(struct seq_file *seq, struct mddev *mddev);
577*4882a593Smuzhiyun /* error_handler must set ->faulty and clear ->in_sync
578*4882a593Smuzhiyun * if appropriate, and should abort recovery if needed
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
581*4882a593Smuzhiyun int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
582*4882a593Smuzhiyun int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
583*4882a593Smuzhiyun int (*spare_active) (struct mddev *mddev);
584*4882a593Smuzhiyun sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
585*4882a593Smuzhiyun int (*resize) (struct mddev *mddev, sector_t sectors);
586*4882a593Smuzhiyun sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
587*4882a593Smuzhiyun int (*check_reshape) (struct mddev *mddev);
588*4882a593Smuzhiyun int (*start_reshape) (struct mddev *mddev);
589*4882a593Smuzhiyun void (*finish_reshape) (struct mddev *mddev);
590*4882a593Smuzhiyun void (*update_reshape_pos) (struct mddev *mddev);
591*4882a593Smuzhiyun /* quiesce suspends or resumes internal processing.
592*4882a593Smuzhiyun * 1 - stop new actions and wait for action io to complete
593*4882a593Smuzhiyun * 0 - return to normal behaviour
594*4882a593Smuzhiyun */
595*4882a593Smuzhiyun void (*quiesce) (struct mddev *mddev, int quiesce);
596*4882a593Smuzhiyun /* takeover is used to transition an array from one
597*4882a593Smuzhiyun * personality to another. The new personality must be able
598*4882a593Smuzhiyun * to handle the data in the current layout.
599*4882a593Smuzhiyun * e.g. 2drive raid1 -> 2drive raid5
600*4882a593Smuzhiyun * ndrive raid5 -> degraded n+1drive raid6 with special layout
601*4882a593Smuzhiyun * If the takeover succeeds, a new 'private' structure is returned.
602*4882a593Smuzhiyun * This needs to be installed and then ->run used to activate the
603*4882a593Smuzhiyun * array.
604*4882a593Smuzhiyun */
605*4882a593Smuzhiyun void *(*takeover) (struct mddev *mddev);
606*4882a593Smuzhiyun /* Changes the consistency policy of an active array. */
607*4882a593Smuzhiyun int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
608*4882a593Smuzhiyun };
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun struct md_sysfs_entry {
611*4882a593Smuzhiyun struct attribute attr;
612*4882a593Smuzhiyun ssize_t (*show)(struct mddev *, char *);
613*4882a593Smuzhiyun ssize_t (*store)(struct mddev *, const char *, size_t);
614*4882a593Smuzhiyun };
615*4882a593Smuzhiyun extern struct attribute_group md_bitmap_group;
616*4882a593Smuzhiyun
sysfs_get_dirent_safe(struct kernfs_node * sd,char * name)617*4882a593Smuzhiyun static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun if (sd)
620*4882a593Smuzhiyun return sysfs_get_dirent(sd, name);
621*4882a593Smuzhiyun return sd;
622*4882a593Smuzhiyun }
sysfs_notify_dirent_safe(struct kernfs_node * sd)623*4882a593Smuzhiyun static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun if (sd)
626*4882a593Smuzhiyun sysfs_notify_dirent(sd);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
mdname(struct mddev * mddev)629*4882a593Smuzhiyun static inline char * mdname (struct mddev * mddev)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
sysfs_link_rdev(struct mddev * mddev,struct md_rdev * rdev)634*4882a593Smuzhiyun static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun char nm[20];
637*4882a593Smuzhiyun if (!test_bit(Replacement, &rdev->flags) &&
638*4882a593Smuzhiyun !test_bit(Journal, &rdev->flags) &&
639*4882a593Smuzhiyun mddev->kobj.sd) {
640*4882a593Smuzhiyun sprintf(nm, "rd%d", rdev->raid_disk);
641*4882a593Smuzhiyun return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
642*4882a593Smuzhiyun } else
643*4882a593Smuzhiyun return 0;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
sysfs_unlink_rdev(struct mddev * mddev,struct md_rdev * rdev)646*4882a593Smuzhiyun static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun char nm[20];
649*4882a593Smuzhiyun if (!test_bit(Replacement, &rdev->flags) &&
650*4882a593Smuzhiyun !test_bit(Journal, &rdev->flags) &&
651*4882a593Smuzhiyun mddev->kobj.sd) {
652*4882a593Smuzhiyun sprintf(nm, "rd%d", rdev->raid_disk);
653*4882a593Smuzhiyun sysfs_remove_link(&mddev->kobj, nm);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /*
658*4882a593Smuzhiyun * iterates through some rdev ringlist. It's safe to remove the
659*4882a593Smuzhiyun * current 'rdev'. Dont touch 'tmp' though.
660*4882a593Smuzhiyun */
661*4882a593Smuzhiyun #define rdev_for_each_list(rdev, tmp, head) \
662*4882a593Smuzhiyun list_for_each_entry_safe(rdev, tmp, head, same_set)
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun * iterates through the 'same array disks' ringlist
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun #define rdev_for_each(rdev, mddev) \
668*4882a593Smuzhiyun list_for_each_entry(rdev, &((mddev)->disks), same_set)
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun #define rdev_for_each_safe(rdev, tmp, mddev) \
671*4882a593Smuzhiyun list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun #define rdev_for_each_rcu(rdev, mddev) \
674*4882a593Smuzhiyun list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun struct md_thread {
677*4882a593Smuzhiyun void (*run) (struct md_thread *thread);
678*4882a593Smuzhiyun struct mddev *mddev;
679*4882a593Smuzhiyun wait_queue_head_t wqueue;
680*4882a593Smuzhiyun unsigned long flags;
681*4882a593Smuzhiyun struct task_struct *tsk;
682*4882a593Smuzhiyun unsigned long timeout;
683*4882a593Smuzhiyun void *private;
684*4882a593Smuzhiyun };
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun #define THREAD_WAKEUP 0
687*4882a593Smuzhiyun
safe_put_page(struct page * p)688*4882a593Smuzhiyun static inline void safe_put_page(struct page *p)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun if (p) put_page(p);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun extern int register_md_personality(struct md_personality *p);
694*4882a593Smuzhiyun extern int unregister_md_personality(struct md_personality *p);
695*4882a593Smuzhiyun extern int register_md_cluster_operations(struct md_cluster_operations *ops,
696*4882a593Smuzhiyun struct module *module);
697*4882a593Smuzhiyun extern int unregister_md_cluster_operations(void);
698*4882a593Smuzhiyun extern int md_setup_cluster(struct mddev *mddev, int nodes);
699*4882a593Smuzhiyun extern void md_cluster_stop(struct mddev *mddev);
700*4882a593Smuzhiyun extern struct md_thread *md_register_thread(
701*4882a593Smuzhiyun void (*run)(struct md_thread *thread),
702*4882a593Smuzhiyun struct mddev *mddev,
703*4882a593Smuzhiyun const char *name);
704*4882a593Smuzhiyun extern void md_unregister_thread(struct md_thread **threadp);
705*4882a593Smuzhiyun extern void md_wakeup_thread(struct md_thread *thread);
706*4882a593Smuzhiyun extern void md_check_recovery(struct mddev *mddev);
707*4882a593Smuzhiyun extern void md_reap_sync_thread(struct mddev *mddev);
708*4882a593Smuzhiyun extern int mddev_init_writes_pending(struct mddev *mddev);
709*4882a593Smuzhiyun extern bool md_write_start(struct mddev *mddev, struct bio *bi);
710*4882a593Smuzhiyun extern void md_write_inc(struct mddev *mddev, struct bio *bi);
711*4882a593Smuzhiyun extern void md_write_end(struct mddev *mddev);
712*4882a593Smuzhiyun extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
713*4882a593Smuzhiyun extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
714*4882a593Smuzhiyun extern void md_finish_reshape(struct mddev *mddev);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
717*4882a593Smuzhiyun extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
718*4882a593Smuzhiyun sector_t sector, int size, struct page *page);
719*4882a593Smuzhiyun extern int md_super_wait(struct mddev *mddev);
720*4882a593Smuzhiyun extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
721*4882a593Smuzhiyun struct page *page, int op, int op_flags,
722*4882a593Smuzhiyun bool metadata_op);
723*4882a593Smuzhiyun extern void md_do_sync(struct md_thread *thread);
724*4882a593Smuzhiyun extern void md_new_event(struct mddev *mddev);
725*4882a593Smuzhiyun extern void md_allow_write(struct mddev *mddev);
726*4882a593Smuzhiyun extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
727*4882a593Smuzhiyun extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
728*4882a593Smuzhiyun extern int md_check_no_bitmap(struct mddev *mddev);
729*4882a593Smuzhiyun extern int md_integrity_register(struct mddev *mddev);
730*4882a593Smuzhiyun extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
731*4882a593Smuzhiyun extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun extern void mddev_init(struct mddev *mddev);
734*4882a593Smuzhiyun extern int md_run(struct mddev *mddev);
735*4882a593Smuzhiyun extern int md_start(struct mddev *mddev);
736*4882a593Smuzhiyun extern void md_stop(struct mddev *mddev);
737*4882a593Smuzhiyun extern void md_stop_writes(struct mddev *mddev);
738*4882a593Smuzhiyun extern int md_rdev_init(struct md_rdev *rdev);
739*4882a593Smuzhiyun extern void md_rdev_clear(struct md_rdev *rdev);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun extern void md_handle_request(struct mddev *mddev, struct bio *bio);
742*4882a593Smuzhiyun extern void mddev_suspend(struct mddev *mddev);
743*4882a593Smuzhiyun extern void mddev_resume(struct mddev *mddev);
744*4882a593Smuzhiyun extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
745*4882a593Smuzhiyun struct mddev *mddev);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun extern void md_reload_sb(struct mddev *mddev, int raid_disk);
748*4882a593Smuzhiyun extern void md_update_sb(struct mddev *mddev, int force);
749*4882a593Smuzhiyun extern void md_kick_rdev_from_array(struct md_rdev * rdev);
750*4882a593Smuzhiyun extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
751*4882a593Smuzhiyun bool is_suspend);
752*4882a593Smuzhiyun extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
753*4882a593Smuzhiyun bool is_suspend);
754*4882a593Smuzhiyun struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
755*4882a593Smuzhiyun struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
756*4882a593Smuzhiyun
is_mddev_broken(struct md_rdev * rdev,const char * md_type)757*4882a593Smuzhiyun static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun int flags = rdev->bdev->bd_disk->flags;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (!(flags & GENHD_FL_UP)) {
762*4882a593Smuzhiyun if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
763*4882a593Smuzhiyun pr_warn("md: %s: %s array has a missing/failed member\n",
764*4882a593Smuzhiyun mdname(rdev->mddev), md_type);
765*4882a593Smuzhiyun return true;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun return false;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
rdev_dec_pending(struct md_rdev * rdev,struct mddev * mddev)770*4882a593Smuzhiyun static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun int faulty = test_bit(Faulty, &rdev->flags);
773*4882a593Smuzhiyun if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
774*4882a593Smuzhiyun set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
775*4882a593Smuzhiyun md_wakeup_thread(mddev->thread);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun extern struct md_cluster_operations *md_cluster_ops;
mddev_is_clustered(struct mddev * mddev)780*4882a593Smuzhiyun static inline int mddev_is_clustered(struct mddev *mddev)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /* clear unsupported mddev_flags */
mddev_clear_unsupported_flags(struct mddev * mddev,unsigned long unsupported_flags)786*4882a593Smuzhiyun static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
787*4882a593Smuzhiyun unsigned long unsupported_flags)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun mddev->flags &= ~unsupported_flags;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
mddev_check_writesame(struct mddev * mddev,struct bio * bio)792*4882a593Smuzhiyun static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun if (bio_op(bio) == REQ_OP_WRITE_SAME &&
795*4882a593Smuzhiyun !bio->bi_disk->queue->limits.max_write_same_sectors)
796*4882a593Smuzhiyun mddev->queue->limits.max_write_same_sectors = 0;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
mddev_check_write_zeroes(struct mddev * mddev,struct bio * bio)799*4882a593Smuzhiyun static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
802*4882a593Smuzhiyun !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
803*4882a593Smuzhiyun mddev->queue->limits.max_write_zeroes_sectors = 0;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun struct mdu_array_info_s;
807*4882a593Smuzhiyun struct mdu_disk_info_s;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun extern int mdp_major;
810*4882a593Smuzhiyun void md_autostart_arrays(int part);
811*4882a593Smuzhiyun int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
812*4882a593Smuzhiyun int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
813*4882a593Smuzhiyun int do_md_run(struct mddev *mddev);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun extern const struct block_device_operations md_fops;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun #endif /* _MD_MD_H */
818