xref: /OK3568_Linux_fs/kernel/fs/block_dev.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/fs/block_dev.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1991, 1992  Linus Torvalds
6*4882a593Smuzhiyun  *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun #include <linux/fcntl.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/kmod.h>
14*4882a593Smuzhiyun #include <linux/major.h>
15*4882a593Smuzhiyun #include <linux/device_cgroup.h>
16*4882a593Smuzhiyun #include <linux/highmem.h>
17*4882a593Smuzhiyun #include <linux/blkdev.h>
18*4882a593Smuzhiyun #include <linux/backing-dev.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/blkpg.h>
21*4882a593Smuzhiyun #include <linux/magic.h>
22*4882a593Smuzhiyun #include <linux/buffer_head.h>
23*4882a593Smuzhiyun #include <linux/swap.h>
24*4882a593Smuzhiyun #include <linux/pagevec.h>
25*4882a593Smuzhiyun #include <linux/writeback.h>
26*4882a593Smuzhiyun #include <linux/mpage.h>
27*4882a593Smuzhiyun #include <linux/mount.h>
28*4882a593Smuzhiyun #include <linux/pseudo_fs.h>
29*4882a593Smuzhiyun #include <linux/uio.h>
30*4882a593Smuzhiyun #include <linux/namei.h>
31*4882a593Smuzhiyun #include <linux/log2.h>
32*4882a593Smuzhiyun #include <linux/cleancache.h>
33*4882a593Smuzhiyun #include <linux/task_io_accounting_ops.h>
34*4882a593Smuzhiyun #include <linux/falloc.h>
35*4882a593Smuzhiyun #include <linux/uaccess.h>
36*4882a593Smuzhiyun #include <linux/suspend.h>
37*4882a593Smuzhiyun #include "internal.h"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun struct bdev_inode {
40*4882a593Smuzhiyun 	struct block_device bdev;
41*4882a593Smuzhiyun 	struct inode vfs_inode;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static const struct address_space_operations def_blk_aops;
45*4882a593Smuzhiyun 
BDEV_I(struct inode * inode)46*4882a593Smuzhiyun static inline struct bdev_inode *BDEV_I(struct inode *inode)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return container_of(inode, struct bdev_inode, vfs_inode);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
I_BDEV(struct inode * inode)51*4882a593Smuzhiyun struct block_device *I_BDEV(struct inode *inode)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	return &BDEV_I(inode)->bdev;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun EXPORT_SYMBOL(I_BDEV);
56*4882a593Smuzhiyun 
bdev_write_inode(struct block_device * bdev)57*4882a593Smuzhiyun static void bdev_write_inode(struct block_device *bdev)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct inode *inode = bdev->bd_inode;
60*4882a593Smuzhiyun 	int ret;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	spin_lock(&inode->i_lock);
63*4882a593Smuzhiyun 	while (inode->i_state & I_DIRTY) {
64*4882a593Smuzhiyun 		spin_unlock(&inode->i_lock);
65*4882a593Smuzhiyun 		ret = write_inode_now(inode, true);
66*4882a593Smuzhiyun 		if (ret) {
67*4882a593Smuzhiyun 			char name[BDEVNAME_SIZE];
68*4882a593Smuzhiyun 			pr_warn_ratelimited("VFS: Dirty inode writeback failed "
69*4882a593Smuzhiyun 					    "for block device %s (err=%d).\n",
70*4882a593Smuzhiyun 					    bdevname(bdev, name), ret);
71*4882a593Smuzhiyun 		}
72*4882a593Smuzhiyun 		spin_lock(&inode->i_lock);
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun 	spin_unlock(&inode->i_lock);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* Kill _all_ buffers and pagecache , dirty or not.. */
kill_bdev(struct block_device * bdev)78*4882a593Smuzhiyun static void kill_bdev(struct block_device *bdev)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct address_space *mapping = bdev->bd_inode->i_mapping;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
83*4882a593Smuzhiyun 		return;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	invalidate_bh_lrus();
86*4882a593Smuzhiyun 	truncate_inode_pages(mapping, 0);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* Invalidate clean unused buffers and pagecache. */
invalidate_bdev(struct block_device * bdev)90*4882a593Smuzhiyun void invalidate_bdev(struct block_device *bdev)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	struct address_space *mapping = bdev->bd_inode->i_mapping;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (mapping->nrpages) {
95*4882a593Smuzhiyun 		invalidate_bh_lrus();
96*4882a593Smuzhiyun 		lru_add_drain_all();	/* make sure all lru add caches are flushed */
97*4882a593Smuzhiyun 		invalidate_mapping_pages(mapping, 0, -1);
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 	/* 99% of the time, we don't need to flush the cleancache on the bdev.
100*4882a593Smuzhiyun 	 * But, for the strange corners, lets be cautious
101*4882a593Smuzhiyun 	 */
102*4882a593Smuzhiyun 	cleancache_invalidate_inode(mapping);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun EXPORT_SYMBOL(invalidate_bdev);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun  * Drop all buffers & page cache for given bdev range. This function bails
108*4882a593Smuzhiyun  * with error if bdev has other exclusive owner (such as filesystem).
109*4882a593Smuzhiyun  */
truncate_bdev_range(struct block_device * bdev,fmode_t mode,loff_t lstart,loff_t lend)110*4882a593Smuzhiyun int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
111*4882a593Smuzhiyun 			loff_t lstart, loff_t lend)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct block_device *claimed_bdev = NULL;
114*4882a593Smuzhiyun 	int err;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	/*
117*4882a593Smuzhiyun 	 * If we don't hold exclusive handle for the device, upgrade to it
118*4882a593Smuzhiyun 	 * while we discard the buffer cache to avoid discarding buffers
119*4882a593Smuzhiyun 	 * under live filesystem.
120*4882a593Smuzhiyun 	 */
121*4882a593Smuzhiyun 	if (!(mode & FMODE_EXCL)) {
122*4882a593Smuzhiyun 		claimed_bdev = bdev->bd_contains;
123*4882a593Smuzhiyun 		err = bd_prepare_to_claim(bdev, claimed_bdev,
124*4882a593Smuzhiyun 					  truncate_bdev_range);
125*4882a593Smuzhiyun 		if (err)
126*4882a593Smuzhiyun 			goto invalidate;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 	truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
129*4882a593Smuzhiyun 	if (claimed_bdev)
130*4882a593Smuzhiyun 		bd_abort_claiming(bdev, claimed_bdev, truncate_bdev_range);
131*4882a593Smuzhiyun 	return 0;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun invalidate:
134*4882a593Smuzhiyun 	/*
135*4882a593Smuzhiyun 	 * Someone else has handle exclusively open. Try invalidating instead.
136*4882a593Smuzhiyun 	 * The 'end' argument is inclusive so the rounding is safe.
137*4882a593Smuzhiyun 	 */
138*4882a593Smuzhiyun 	return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
139*4882a593Smuzhiyun 					     lstart >> PAGE_SHIFT,
140*4882a593Smuzhiyun 					     lend >> PAGE_SHIFT);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun EXPORT_SYMBOL(truncate_bdev_range);
143*4882a593Smuzhiyun 
set_init_blocksize(struct block_device * bdev)144*4882a593Smuzhiyun static void set_init_blocksize(struct block_device *bdev)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	unsigned int bsize = bdev_logical_block_size(bdev);
147*4882a593Smuzhiyun 	loff_t size = i_size_read(bdev->bd_inode);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	while (bsize < PAGE_SIZE) {
150*4882a593Smuzhiyun 		if (size & bsize)
151*4882a593Smuzhiyun 			break;
152*4882a593Smuzhiyun 		bsize <<= 1;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
set_blocksize(struct block_device * bdev,int size)157*4882a593Smuzhiyun int set_blocksize(struct block_device *bdev, int size)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	/* Size must be a power of two, and between 512 and PAGE_SIZE */
160*4882a593Smuzhiyun 	if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
161*4882a593Smuzhiyun 		return -EINVAL;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/* Size cannot be smaller than the size supported by the device */
164*4882a593Smuzhiyun 	if (size < bdev_logical_block_size(bdev))
165*4882a593Smuzhiyun 		return -EINVAL;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	/* Don't change the size if it is same as current */
168*4882a593Smuzhiyun 	if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
169*4882a593Smuzhiyun 		sync_blockdev(bdev);
170*4882a593Smuzhiyun 		bdev->bd_inode->i_blkbits = blksize_bits(size);
171*4882a593Smuzhiyun 		kill_bdev(bdev);
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 	return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun EXPORT_SYMBOL(set_blocksize);
177*4882a593Smuzhiyun 
sb_set_blocksize(struct super_block * sb,int size)178*4882a593Smuzhiyun int sb_set_blocksize(struct super_block *sb, int size)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	if (set_blocksize(sb->s_bdev, size))
181*4882a593Smuzhiyun 		return 0;
182*4882a593Smuzhiyun 	/* If we get here, we know size is power of two
183*4882a593Smuzhiyun 	 * and it's value is between 512 and PAGE_SIZE */
184*4882a593Smuzhiyun 	sb->s_blocksize = size;
185*4882a593Smuzhiyun 	sb->s_blocksize_bits = blksize_bits(size);
186*4882a593Smuzhiyun 	return sb->s_blocksize;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun EXPORT_SYMBOL_NS(sb_set_blocksize, ANDROID_GKI_VFS_EXPORT_ONLY);
190*4882a593Smuzhiyun 
sb_min_blocksize(struct super_block * sb,int size)191*4882a593Smuzhiyun int sb_min_blocksize(struct super_block *sb, int size)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	int minsize = bdev_logical_block_size(sb->s_bdev);
194*4882a593Smuzhiyun 	if (size < minsize)
195*4882a593Smuzhiyun 		size = minsize;
196*4882a593Smuzhiyun 	return sb_set_blocksize(sb, size);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun EXPORT_SYMBOL_NS(sb_min_blocksize, ANDROID_GKI_VFS_EXPORT_ONLY);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun static int
blkdev_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)202*4882a593Smuzhiyun blkdev_get_block(struct inode *inode, sector_t iblock,
203*4882a593Smuzhiyun 		struct buffer_head *bh, int create)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	bh->b_bdev = I_BDEV(inode);
206*4882a593Smuzhiyun 	bh->b_blocknr = iblock;
207*4882a593Smuzhiyun 	set_buffer_mapped(bh);
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
bdev_file_inode(struct file * file)211*4882a593Smuzhiyun static struct inode *bdev_file_inode(struct file *file)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	return file->f_mapping->host;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
dio_bio_write_op(struct kiocb * iocb)216*4882a593Smuzhiyun static unsigned int dio_bio_write_op(struct kiocb *iocb)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* avoid the need for a I/O completion work item */
221*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_DSYNC)
222*4882a593Smuzhiyun 		op |= REQ_FUA;
223*4882a593Smuzhiyun 	return op;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun #define DIO_INLINE_BIO_VECS 4
227*4882a593Smuzhiyun 
blkdev_bio_end_io_simple(struct bio * bio)228*4882a593Smuzhiyun static void blkdev_bio_end_io_simple(struct bio *bio)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct task_struct *waiter = bio->bi_private;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	WRITE_ONCE(bio->bi_private, NULL);
233*4882a593Smuzhiyun 	blk_wake_io_task(waiter);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static ssize_t
__blkdev_direct_IO_simple(struct kiocb * iocb,struct iov_iter * iter,int nr_pages)237*4882a593Smuzhiyun __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
238*4882a593Smuzhiyun 		int nr_pages)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
241*4882a593Smuzhiyun 	struct block_device *bdev = I_BDEV(bdev_file_inode(file));
242*4882a593Smuzhiyun 	struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
243*4882a593Smuzhiyun 	loff_t pos = iocb->ki_pos;
244*4882a593Smuzhiyun 	bool should_dirty = false;
245*4882a593Smuzhiyun 	struct bio bio;
246*4882a593Smuzhiyun 	ssize_t ret;
247*4882a593Smuzhiyun 	blk_qc_t qc;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if ((pos | iov_iter_alignment(iter)) &
250*4882a593Smuzhiyun 	    (bdev_logical_block_size(bdev) - 1))
251*4882a593Smuzhiyun 		return -EINVAL;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (nr_pages <= DIO_INLINE_BIO_VECS)
254*4882a593Smuzhiyun 		vecs = inline_vecs;
255*4882a593Smuzhiyun 	else {
256*4882a593Smuzhiyun 		vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
257*4882a593Smuzhiyun 				     GFP_KERNEL);
258*4882a593Smuzhiyun 		if (!vecs)
259*4882a593Smuzhiyun 			return -ENOMEM;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	bio_init(&bio, vecs, nr_pages);
263*4882a593Smuzhiyun 	bio_set_dev(&bio, bdev);
264*4882a593Smuzhiyun 	bio.bi_iter.bi_sector = pos >> 9;
265*4882a593Smuzhiyun 	bio.bi_write_hint = iocb->ki_hint;
266*4882a593Smuzhiyun 	bio.bi_private = current;
267*4882a593Smuzhiyun 	bio.bi_end_io = blkdev_bio_end_io_simple;
268*4882a593Smuzhiyun 	bio.bi_ioprio = iocb->ki_ioprio;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	ret = bio_iov_iter_get_pages(&bio, iter);
271*4882a593Smuzhiyun 	if (unlikely(ret))
272*4882a593Smuzhiyun 		goto out;
273*4882a593Smuzhiyun 	ret = bio.bi_iter.bi_size;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (iov_iter_rw(iter) == READ) {
276*4882a593Smuzhiyun 		bio.bi_opf = REQ_OP_READ;
277*4882a593Smuzhiyun 		if (iter_is_iovec(iter))
278*4882a593Smuzhiyun 			should_dirty = true;
279*4882a593Smuzhiyun 	} else {
280*4882a593Smuzhiyun 		bio.bi_opf = dio_bio_write_op(iocb);
281*4882a593Smuzhiyun 		task_io_account_write(ret);
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT)
284*4882a593Smuzhiyun 		bio.bi_opf |= REQ_NOWAIT;
285*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_HIPRI)
286*4882a593Smuzhiyun 		bio_set_polled(&bio, iocb);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	qc = submit_bio(&bio);
289*4882a593Smuzhiyun 	for (;;) {
290*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
291*4882a593Smuzhiyun 		if (!READ_ONCE(bio.bi_private))
292*4882a593Smuzhiyun 			break;
293*4882a593Smuzhiyun 		if (!(iocb->ki_flags & IOCB_HIPRI) ||
294*4882a593Smuzhiyun 		    !blk_poll(bdev_get_queue(bdev), qc, true))
295*4882a593Smuzhiyun 			blk_io_schedule();
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	bio_release_pages(&bio, should_dirty);
300*4882a593Smuzhiyun 	if (unlikely(bio.bi_status))
301*4882a593Smuzhiyun 		ret = blk_status_to_errno(bio.bi_status);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun out:
304*4882a593Smuzhiyun 	if (vecs != inline_vecs)
305*4882a593Smuzhiyun 		kfree(vecs);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	bio_uninit(&bio);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	return ret;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun struct blkdev_dio {
313*4882a593Smuzhiyun 	union {
314*4882a593Smuzhiyun 		struct kiocb		*iocb;
315*4882a593Smuzhiyun 		struct task_struct	*waiter;
316*4882a593Smuzhiyun 	};
317*4882a593Smuzhiyun 	size_t			size;
318*4882a593Smuzhiyun 	atomic_t		ref;
319*4882a593Smuzhiyun 	bool			multi_bio : 1;
320*4882a593Smuzhiyun 	bool			should_dirty : 1;
321*4882a593Smuzhiyun 	bool			is_sync : 1;
322*4882a593Smuzhiyun 	struct bio		bio;
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun static struct bio_set blkdev_dio_pool;
326*4882a593Smuzhiyun 
blkdev_iopoll(struct kiocb * kiocb,bool wait)327*4882a593Smuzhiyun static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
330*4882a593Smuzhiyun 	struct request_queue *q = bdev_get_queue(bdev);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
blkdev_bio_end_io(struct bio * bio)335*4882a593Smuzhiyun static void blkdev_bio_end_io(struct bio *bio)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct blkdev_dio *dio = bio->bi_private;
338*4882a593Smuzhiyun 	bool should_dirty = dio->should_dirty;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if (bio->bi_status && !dio->bio.bi_status)
341*4882a593Smuzhiyun 		dio->bio.bi_status = bio->bi_status;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
344*4882a593Smuzhiyun 		if (!dio->is_sync) {
345*4882a593Smuzhiyun 			struct kiocb *iocb = dio->iocb;
346*4882a593Smuzhiyun 			ssize_t ret;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 			if (likely(!dio->bio.bi_status)) {
349*4882a593Smuzhiyun 				ret = dio->size;
350*4882a593Smuzhiyun 				iocb->ki_pos += ret;
351*4882a593Smuzhiyun 			} else {
352*4882a593Smuzhiyun 				ret = blk_status_to_errno(dio->bio.bi_status);
353*4882a593Smuzhiyun 			}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 			dio->iocb->ki_complete(iocb, ret, 0);
356*4882a593Smuzhiyun 			if (dio->multi_bio)
357*4882a593Smuzhiyun 				bio_put(&dio->bio);
358*4882a593Smuzhiyun 		} else {
359*4882a593Smuzhiyun 			struct task_struct *waiter = dio->waiter;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 			WRITE_ONCE(dio->waiter, NULL);
362*4882a593Smuzhiyun 			blk_wake_io_task(waiter);
363*4882a593Smuzhiyun 		}
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	if (should_dirty) {
367*4882a593Smuzhiyun 		bio_check_pages_dirty(bio);
368*4882a593Smuzhiyun 	} else {
369*4882a593Smuzhiyun 		bio_release_pages(bio, false);
370*4882a593Smuzhiyun 		bio_put(bio);
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun static ssize_t
__blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter,int nr_pages)375*4882a593Smuzhiyun __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
378*4882a593Smuzhiyun 	struct inode *inode = bdev_file_inode(file);
379*4882a593Smuzhiyun 	struct block_device *bdev = I_BDEV(inode);
380*4882a593Smuzhiyun 	struct blk_plug plug;
381*4882a593Smuzhiyun 	struct blkdev_dio *dio;
382*4882a593Smuzhiyun 	struct bio *bio;
383*4882a593Smuzhiyun 	bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
384*4882a593Smuzhiyun 	bool is_read = (iov_iter_rw(iter) == READ), is_sync;
385*4882a593Smuzhiyun 	loff_t pos = iocb->ki_pos;
386*4882a593Smuzhiyun 	blk_qc_t qc = BLK_QC_T_NONE;
387*4882a593Smuzhiyun 	int ret = 0;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if ((pos | iov_iter_alignment(iter)) &
390*4882a593Smuzhiyun 	    (bdev_logical_block_size(bdev) - 1))
391*4882a593Smuzhiyun 		return -EINVAL;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	dio = container_of(bio, struct blkdev_dio, bio);
396*4882a593Smuzhiyun 	dio->is_sync = is_sync = is_sync_kiocb(iocb);
397*4882a593Smuzhiyun 	if (dio->is_sync) {
398*4882a593Smuzhiyun 		dio->waiter = current;
399*4882a593Smuzhiyun 		bio_get(bio);
400*4882a593Smuzhiyun 	} else {
401*4882a593Smuzhiyun 		dio->iocb = iocb;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	dio->size = 0;
405*4882a593Smuzhiyun 	dio->multi_bio = false;
406*4882a593Smuzhiyun 	dio->should_dirty = is_read && iter_is_iovec(iter);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/*
409*4882a593Smuzhiyun 	 * Don't plug for HIPRI/polled IO, as those should go straight
410*4882a593Smuzhiyun 	 * to issue
411*4882a593Smuzhiyun 	 */
412*4882a593Smuzhiyun 	if (!is_poll)
413*4882a593Smuzhiyun 		blk_start_plug(&plug);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	for (;;) {
416*4882a593Smuzhiyun 		bio_set_dev(bio, bdev);
417*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = pos >> 9;
418*4882a593Smuzhiyun 		bio->bi_write_hint = iocb->ki_hint;
419*4882a593Smuzhiyun 		bio->bi_private = dio;
420*4882a593Smuzhiyun 		bio->bi_end_io = blkdev_bio_end_io;
421*4882a593Smuzhiyun 		bio->bi_ioprio = iocb->ki_ioprio;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		ret = bio_iov_iter_get_pages(bio, iter);
424*4882a593Smuzhiyun 		if (unlikely(ret)) {
425*4882a593Smuzhiyun 			bio->bi_status = BLK_STS_IOERR;
426*4882a593Smuzhiyun 			bio_endio(bio);
427*4882a593Smuzhiyun 			break;
428*4882a593Smuzhiyun 		}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		if (is_read) {
431*4882a593Smuzhiyun 			bio->bi_opf = REQ_OP_READ;
432*4882a593Smuzhiyun 			if (dio->should_dirty)
433*4882a593Smuzhiyun 				bio_set_pages_dirty(bio);
434*4882a593Smuzhiyun 		} else {
435*4882a593Smuzhiyun 			bio->bi_opf = dio_bio_write_op(iocb);
436*4882a593Smuzhiyun 			task_io_account_write(bio->bi_iter.bi_size);
437*4882a593Smuzhiyun 		}
438*4882a593Smuzhiyun 		if (iocb->ki_flags & IOCB_NOWAIT)
439*4882a593Smuzhiyun 			bio->bi_opf |= REQ_NOWAIT;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		dio->size += bio->bi_iter.bi_size;
442*4882a593Smuzhiyun 		pos += bio->bi_iter.bi_size;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 		nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
445*4882a593Smuzhiyun 		if (!nr_pages) {
446*4882a593Smuzhiyun 			bool polled = false;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 			if (iocb->ki_flags & IOCB_HIPRI) {
449*4882a593Smuzhiyun 				bio_set_polled(bio, iocb);
450*4882a593Smuzhiyun 				polled = true;
451*4882a593Smuzhiyun 			}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 			qc = submit_bio(bio);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 			if (polled)
456*4882a593Smuzhiyun 				WRITE_ONCE(iocb->ki_cookie, qc);
457*4882a593Smuzhiyun 			break;
458*4882a593Smuzhiyun 		}
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		if (!dio->multi_bio) {
461*4882a593Smuzhiyun 			/*
462*4882a593Smuzhiyun 			 * AIO needs an extra reference to ensure the dio
463*4882a593Smuzhiyun 			 * structure which is embedded into the first bio
464*4882a593Smuzhiyun 			 * stays around.
465*4882a593Smuzhiyun 			 */
466*4882a593Smuzhiyun 			if (!is_sync)
467*4882a593Smuzhiyun 				bio_get(bio);
468*4882a593Smuzhiyun 			dio->multi_bio = true;
469*4882a593Smuzhiyun 			atomic_set(&dio->ref, 2);
470*4882a593Smuzhiyun 		} else {
471*4882a593Smuzhiyun 			atomic_inc(&dio->ref);
472*4882a593Smuzhiyun 		}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 		submit_bio(bio);
475*4882a593Smuzhiyun 		bio = bio_alloc(GFP_KERNEL, nr_pages);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	if (!is_poll)
479*4882a593Smuzhiyun 		blk_finish_plug(&plug);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	if (!is_sync)
482*4882a593Smuzhiyun 		return -EIOCBQUEUED;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	for (;;) {
485*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
486*4882a593Smuzhiyun 		if (!READ_ONCE(dio->waiter))
487*4882a593Smuzhiyun 			break;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 		if (!(iocb->ki_flags & IOCB_HIPRI) ||
490*4882a593Smuzhiyun 		    !blk_poll(bdev_get_queue(bdev), qc, true))
491*4882a593Smuzhiyun 			blk_io_schedule();
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	if (!ret)
496*4882a593Smuzhiyun 		ret = blk_status_to_errno(dio->bio.bi_status);
497*4882a593Smuzhiyun 	if (likely(!ret))
498*4882a593Smuzhiyun 		ret = dio->size;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	bio_put(&dio->bio);
501*4882a593Smuzhiyun 	return ret;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun static ssize_t
blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter)505*4882a593Smuzhiyun blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	int nr_pages;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES + 1);
510*4882a593Smuzhiyun 	if (!nr_pages)
511*4882a593Smuzhiyun 		return 0;
512*4882a593Smuzhiyun 	if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES)
513*4882a593Smuzhiyun 		return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES));
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
blkdev_init(void)518*4882a593Smuzhiyun static __init int blkdev_init(void)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	return bioset_init(&blkdev_dio_pool, 4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun module_init(blkdev_init);
523*4882a593Smuzhiyun 
__sync_blockdev(struct block_device * bdev,int wait)524*4882a593Smuzhiyun int __sync_blockdev(struct block_device *bdev, int wait)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	if (!bdev)
527*4882a593Smuzhiyun 		return 0;
528*4882a593Smuzhiyun 	if (!wait)
529*4882a593Smuzhiyun 		return filemap_flush(bdev->bd_inode->i_mapping);
530*4882a593Smuzhiyun 	return filemap_write_and_wait(bdev->bd_inode->i_mapping);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun  * Write out and wait upon all the dirty data associated with a block
535*4882a593Smuzhiyun  * device via its mapping.  Does not take the superblock lock.
536*4882a593Smuzhiyun  */
sync_blockdev(struct block_device * bdev)537*4882a593Smuzhiyun int sync_blockdev(struct block_device *bdev)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	return __sync_blockdev(bdev, 1);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun EXPORT_SYMBOL(sync_blockdev);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun /*
544*4882a593Smuzhiyun  * Write out and wait upon all dirty data associated with this
545*4882a593Smuzhiyun  * device.   Filesystem data as well as the underlying block
546*4882a593Smuzhiyun  * device.  Takes the superblock lock.
547*4882a593Smuzhiyun  */
fsync_bdev(struct block_device * bdev)548*4882a593Smuzhiyun int fsync_bdev(struct block_device *bdev)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct super_block *sb = get_super(bdev);
551*4882a593Smuzhiyun 	if (sb) {
552*4882a593Smuzhiyun 		int res = sync_filesystem(sb);
553*4882a593Smuzhiyun 		drop_super(sb);
554*4882a593Smuzhiyun 		return res;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 	return sync_blockdev(bdev);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun EXPORT_SYMBOL(fsync_bdev);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun /**
561*4882a593Smuzhiyun  * freeze_bdev  --  lock a filesystem and force it into a consistent state
562*4882a593Smuzhiyun  * @bdev:	blockdevice to lock
563*4882a593Smuzhiyun  *
564*4882a593Smuzhiyun  * If a superblock is found on this device, we take the s_umount semaphore
565*4882a593Smuzhiyun  * on it to make sure nobody unmounts until the snapshot creation is done.
566*4882a593Smuzhiyun  * The reference counter (bd_fsfreeze_count) guarantees that only the last
567*4882a593Smuzhiyun  * unfreeze process can unfreeze the frozen filesystem actually when multiple
568*4882a593Smuzhiyun  * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
569*4882a593Smuzhiyun  * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
570*4882a593Smuzhiyun  * actually.
571*4882a593Smuzhiyun  */
freeze_bdev(struct block_device * bdev)572*4882a593Smuzhiyun int freeze_bdev(struct block_device *bdev)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	struct super_block *sb;
575*4882a593Smuzhiyun 	int error = 0;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	mutex_lock(&bdev->bd_fsfreeze_mutex);
578*4882a593Smuzhiyun 	if (++bdev->bd_fsfreeze_count > 1)
579*4882a593Smuzhiyun 		goto done;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	sb = get_active_super(bdev);
582*4882a593Smuzhiyun 	if (!sb)
583*4882a593Smuzhiyun 		goto sync;
584*4882a593Smuzhiyun 	if (sb->s_op->freeze_super)
585*4882a593Smuzhiyun 		error = sb->s_op->freeze_super(sb);
586*4882a593Smuzhiyun 	else
587*4882a593Smuzhiyun 		error = freeze_super(sb);
588*4882a593Smuzhiyun 	deactivate_super(sb);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (error) {
591*4882a593Smuzhiyun 		bdev->bd_fsfreeze_count--;
592*4882a593Smuzhiyun 		goto done;
593*4882a593Smuzhiyun 	}
594*4882a593Smuzhiyun 	bdev->bd_fsfreeze_sb = sb;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun sync:
597*4882a593Smuzhiyun 	sync_blockdev(bdev);
598*4882a593Smuzhiyun done:
599*4882a593Smuzhiyun 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
600*4882a593Smuzhiyun 	return error;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun EXPORT_SYMBOL(freeze_bdev);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun /**
605*4882a593Smuzhiyun  * thaw_bdev  -- unlock filesystem
606*4882a593Smuzhiyun  * @bdev:	blockdevice to unlock
607*4882a593Smuzhiyun  *
608*4882a593Smuzhiyun  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
609*4882a593Smuzhiyun  */
thaw_bdev(struct block_device * bdev)610*4882a593Smuzhiyun int thaw_bdev(struct block_device *bdev)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	struct super_block *sb;
613*4882a593Smuzhiyun 	int error = -EINVAL;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	mutex_lock(&bdev->bd_fsfreeze_mutex);
616*4882a593Smuzhiyun 	if (!bdev->bd_fsfreeze_count)
617*4882a593Smuzhiyun 		goto out;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	error = 0;
620*4882a593Smuzhiyun 	if (--bdev->bd_fsfreeze_count > 0)
621*4882a593Smuzhiyun 		goto out;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	sb = bdev->bd_fsfreeze_sb;
624*4882a593Smuzhiyun 	if (!sb)
625*4882a593Smuzhiyun 		goto out;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (sb->s_op->thaw_super)
628*4882a593Smuzhiyun 		error = sb->s_op->thaw_super(sb);
629*4882a593Smuzhiyun 	else
630*4882a593Smuzhiyun 		error = thaw_super(sb);
631*4882a593Smuzhiyun 	if (error)
632*4882a593Smuzhiyun 		bdev->bd_fsfreeze_count++;
633*4882a593Smuzhiyun out:
634*4882a593Smuzhiyun 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
635*4882a593Smuzhiyun 	return error;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun EXPORT_SYMBOL(thaw_bdev);
638*4882a593Smuzhiyun 
blkdev_writepage(struct page * page,struct writeback_control * wbc)639*4882a593Smuzhiyun static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	return block_write_full_page(page, blkdev_get_block, wbc);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
blkdev_readpage(struct file * file,struct page * page)644*4882a593Smuzhiyun static int blkdev_readpage(struct file * file, struct page * page)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	return block_read_full_page(page, blkdev_get_block);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun 
blkdev_readahead(struct readahead_control * rac)649*4882a593Smuzhiyun static void blkdev_readahead(struct readahead_control *rac)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	mpage_readahead(rac, blkdev_get_block);
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
blkdev_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)654*4882a593Smuzhiyun static int blkdev_write_begin(struct file *file, struct address_space *mapping,
655*4882a593Smuzhiyun 			loff_t pos, unsigned len, unsigned flags,
656*4882a593Smuzhiyun 			struct page **pagep, void **fsdata)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	return block_write_begin(mapping, pos, len, flags, pagep,
659*4882a593Smuzhiyun 				 blkdev_get_block);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
blkdev_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)662*4882a593Smuzhiyun static int blkdev_write_end(struct file *file, struct address_space *mapping,
663*4882a593Smuzhiyun 			loff_t pos, unsigned len, unsigned copied,
664*4882a593Smuzhiyun 			struct page *page, void *fsdata)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun 	int ret;
667*4882a593Smuzhiyun 	ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	unlock_page(page);
670*4882a593Smuzhiyun 	put_page(page);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	return ret;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun /*
676*4882a593Smuzhiyun  * private llseek:
677*4882a593Smuzhiyun  * for a block special file file_inode(file)->i_size is zero
678*4882a593Smuzhiyun  * so we compute the size by hand (just as in block_read/write above)
679*4882a593Smuzhiyun  */
block_llseek(struct file * file,loff_t offset,int whence)680*4882a593Smuzhiyun static loff_t block_llseek(struct file *file, loff_t offset, int whence)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	struct inode *bd_inode = bdev_file_inode(file);
683*4882a593Smuzhiyun 	loff_t retval;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	inode_lock(bd_inode);
686*4882a593Smuzhiyun 	retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
687*4882a593Smuzhiyun 	inode_unlock(bd_inode);
688*4882a593Smuzhiyun 	return retval;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
blkdev_fsync(struct file * filp,loff_t start,loff_t end,int datasync)691*4882a593Smuzhiyun int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	struct inode *bd_inode = bdev_file_inode(filp);
694*4882a593Smuzhiyun 	struct block_device *bdev = I_BDEV(bd_inode);
695*4882a593Smuzhiyun 	int error;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	error = file_write_and_wait_range(filp, start, end);
698*4882a593Smuzhiyun 	if (error)
699*4882a593Smuzhiyun 		return error;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	/*
702*4882a593Smuzhiyun 	 * There is no need to serialise calls to blkdev_issue_flush with
703*4882a593Smuzhiyun 	 * i_mutex and doing so causes performance issues with concurrent
704*4882a593Smuzhiyun 	 * O_SYNC writers to a block device.
705*4882a593Smuzhiyun 	 */
706*4882a593Smuzhiyun 	error = blkdev_issue_flush(bdev, GFP_KERNEL);
707*4882a593Smuzhiyun 	if (error == -EOPNOTSUPP)
708*4882a593Smuzhiyun 		error = 0;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	return error;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun EXPORT_SYMBOL(blkdev_fsync);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun /**
715*4882a593Smuzhiyun  * bdev_read_page() - Start reading a page from a block device
716*4882a593Smuzhiyun  * @bdev: The device to read the page from
717*4882a593Smuzhiyun  * @sector: The offset on the device to read the page to (need not be aligned)
718*4882a593Smuzhiyun  * @page: The page to read
719*4882a593Smuzhiyun  *
720*4882a593Smuzhiyun  * On entry, the page should be locked.  It will be unlocked when the page
721*4882a593Smuzhiyun  * has been read.  If the block driver implements rw_page synchronously,
722*4882a593Smuzhiyun  * that will be true on exit from this function, but it need not be.
723*4882a593Smuzhiyun  *
724*4882a593Smuzhiyun  * Errors returned by this function are usually "soft", eg out of memory, or
725*4882a593Smuzhiyun  * queue full; callers should try a different route to read this page rather
726*4882a593Smuzhiyun  * than propagate an error back up the stack.
727*4882a593Smuzhiyun  *
728*4882a593Smuzhiyun  * Return: negative errno if an error occurs, 0 if submission was successful.
729*4882a593Smuzhiyun  */
bdev_read_page(struct block_device * bdev,sector_t sector,struct page * page)730*4882a593Smuzhiyun int bdev_read_page(struct block_device *bdev, sector_t sector,
731*4882a593Smuzhiyun 			struct page *page)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun 	const struct block_device_operations *ops = bdev->bd_disk->fops;
734*4882a593Smuzhiyun 	int result = -EOPNOTSUPP;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	if (!ops->rw_page || bdev_get_integrity(bdev))
737*4882a593Smuzhiyun 		return result;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	result = blk_queue_enter(bdev->bd_disk->queue, 0);
740*4882a593Smuzhiyun 	if (result)
741*4882a593Smuzhiyun 		return result;
742*4882a593Smuzhiyun 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
743*4882a593Smuzhiyun 			      REQ_OP_READ);
744*4882a593Smuzhiyun 	blk_queue_exit(bdev->bd_disk->queue);
745*4882a593Smuzhiyun 	return result;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun /**
749*4882a593Smuzhiyun  * bdev_write_page() - Start writing a page to a block device
750*4882a593Smuzhiyun  * @bdev: The device to write the page to
751*4882a593Smuzhiyun  * @sector: The offset on the device to write the page to (need not be aligned)
752*4882a593Smuzhiyun  * @page: The page to write
753*4882a593Smuzhiyun  * @wbc: The writeback_control for the write
754*4882a593Smuzhiyun  *
755*4882a593Smuzhiyun  * On entry, the page should be locked and not currently under writeback.
756*4882a593Smuzhiyun  * On exit, if the write started successfully, the page will be unlocked and
757*4882a593Smuzhiyun  * under writeback.  If the write failed already (eg the driver failed to
758*4882a593Smuzhiyun  * queue the page to the device), the page will still be locked.  If the
759*4882a593Smuzhiyun  * caller is a ->writepage implementation, it will need to unlock the page.
760*4882a593Smuzhiyun  *
761*4882a593Smuzhiyun  * Errors returned by this function are usually "soft", eg out of memory, or
762*4882a593Smuzhiyun  * queue full; callers should try a different route to write this page rather
763*4882a593Smuzhiyun  * than propagate an error back up the stack.
764*4882a593Smuzhiyun  *
765*4882a593Smuzhiyun  * Return: negative errno if an error occurs, 0 if submission was successful.
766*4882a593Smuzhiyun  */
bdev_write_page(struct block_device * bdev,sector_t sector,struct page * page,struct writeback_control * wbc)767*4882a593Smuzhiyun int bdev_write_page(struct block_device *bdev, sector_t sector,
768*4882a593Smuzhiyun 			struct page *page, struct writeback_control *wbc)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun 	int result;
771*4882a593Smuzhiyun 	const struct block_device_operations *ops = bdev->bd_disk->fops;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	if (!ops->rw_page || bdev_get_integrity(bdev))
774*4882a593Smuzhiyun 		return -EOPNOTSUPP;
775*4882a593Smuzhiyun 	result = blk_queue_enter(bdev->bd_disk->queue, 0);
776*4882a593Smuzhiyun 	if (result)
777*4882a593Smuzhiyun 		return result;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	set_page_writeback(page);
780*4882a593Smuzhiyun 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
781*4882a593Smuzhiyun 			      REQ_OP_WRITE);
782*4882a593Smuzhiyun 	if (result) {
783*4882a593Smuzhiyun 		end_page_writeback(page);
784*4882a593Smuzhiyun 	} else {
785*4882a593Smuzhiyun 		clean_page_buffers(page);
786*4882a593Smuzhiyun 		unlock_page(page);
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 	blk_queue_exit(bdev->bd_disk->queue);
789*4882a593Smuzhiyun 	return result;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun /*
793*4882a593Smuzhiyun  * pseudo-fs
794*4882a593Smuzhiyun  */
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
797*4882a593Smuzhiyun static struct kmem_cache * bdev_cachep __read_mostly;
798*4882a593Smuzhiyun 
bdev_alloc_inode(struct super_block * sb)799*4882a593Smuzhiyun static struct inode *bdev_alloc_inode(struct super_block *sb)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun 	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
802*4882a593Smuzhiyun 	if (!ei)
803*4882a593Smuzhiyun 		return NULL;
804*4882a593Smuzhiyun 	return &ei->vfs_inode;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun 
bdev_free_inode(struct inode * inode)807*4882a593Smuzhiyun static void bdev_free_inode(struct inode *inode)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	kmem_cache_free(bdev_cachep, BDEV_I(inode));
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
init_once(void * foo)812*4882a593Smuzhiyun static void init_once(void *foo)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	struct bdev_inode *ei = (struct bdev_inode *) foo;
815*4882a593Smuzhiyun 	struct block_device *bdev = &ei->bdev;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	memset(bdev, 0, sizeof(*bdev));
818*4882a593Smuzhiyun 	mutex_init(&bdev->bd_mutex);
819*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
820*4882a593Smuzhiyun 	INIT_LIST_HEAD(&bdev->bd_holder_disks);
821*4882a593Smuzhiyun #endif
822*4882a593Smuzhiyun 	bdev->bd_bdi = &noop_backing_dev_info;
823*4882a593Smuzhiyun 	inode_init_once(&ei->vfs_inode);
824*4882a593Smuzhiyun 	/* Initialize mutex for freeze. */
825*4882a593Smuzhiyun 	mutex_init(&bdev->bd_fsfreeze_mutex);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
bdev_evict_inode(struct inode * inode)828*4882a593Smuzhiyun static void bdev_evict_inode(struct inode *inode)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun 	struct block_device *bdev = &BDEV_I(inode)->bdev;
831*4882a593Smuzhiyun 	truncate_inode_pages_final(&inode->i_data);
832*4882a593Smuzhiyun 	invalidate_inode_buffers(inode); /* is it needed here? */
833*4882a593Smuzhiyun 	clear_inode(inode);
834*4882a593Smuzhiyun 	/* Detach inode from wb early as bdi_put() may free bdi->wb */
835*4882a593Smuzhiyun 	inode_detach_wb(inode);
836*4882a593Smuzhiyun 	if (bdev->bd_bdi != &noop_backing_dev_info) {
837*4882a593Smuzhiyun 		bdi_put(bdev->bd_bdi);
838*4882a593Smuzhiyun 		bdev->bd_bdi = &noop_backing_dev_info;
839*4882a593Smuzhiyun 	}
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun static const struct super_operations bdev_sops = {
843*4882a593Smuzhiyun 	.statfs = simple_statfs,
844*4882a593Smuzhiyun 	.alloc_inode = bdev_alloc_inode,
845*4882a593Smuzhiyun 	.free_inode = bdev_free_inode,
846*4882a593Smuzhiyun 	.drop_inode = generic_delete_inode,
847*4882a593Smuzhiyun 	.evict_inode = bdev_evict_inode,
848*4882a593Smuzhiyun };
849*4882a593Smuzhiyun 
bd_init_fs_context(struct fs_context * fc)850*4882a593Smuzhiyun static int bd_init_fs_context(struct fs_context *fc)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
853*4882a593Smuzhiyun 	if (!ctx)
854*4882a593Smuzhiyun 		return -ENOMEM;
855*4882a593Smuzhiyun 	fc->s_iflags |= SB_I_CGROUPWB;
856*4882a593Smuzhiyun 	ctx->ops = &bdev_sops;
857*4882a593Smuzhiyun 	return 0;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun static struct file_system_type bd_type = {
861*4882a593Smuzhiyun 	.name		= "bdev",
862*4882a593Smuzhiyun 	.init_fs_context = bd_init_fs_context,
863*4882a593Smuzhiyun 	.kill_sb	= kill_anon_super,
864*4882a593Smuzhiyun };
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun struct super_block *blockdev_superblock __read_mostly;
867*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blockdev_superblock);
868*4882a593Smuzhiyun 
bdev_cache_init(void)869*4882a593Smuzhiyun void __init bdev_cache_init(void)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	int err;
872*4882a593Smuzhiyun 	static struct vfsmount *bd_mnt;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
875*4882a593Smuzhiyun 			0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
876*4882a593Smuzhiyun 				SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
877*4882a593Smuzhiyun 			init_once);
878*4882a593Smuzhiyun 	err = register_filesystem(&bd_type);
879*4882a593Smuzhiyun 	if (err)
880*4882a593Smuzhiyun 		panic("Cannot register bdev pseudo-fs");
881*4882a593Smuzhiyun 	bd_mnt = kern_mount(&bd_type);
882*4882a593Smuzhiyun 	if (IS_ERR(bd_mnt))
883*4882a593Smuzhiyun 		panic("Cannot create bdev pseudo-fs");
884*4882a593Smuzhiyun 	blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun /*
888*4882a593Smuzhiyun  * Most likely _very_ bad one - but then it's hardly critical for small
889*4882a593Smuzhiyun  * /dev and can be fixed when somebody will need really large one.
890*4882a593Smuzhiyun  * Keep in mind that it will be fed through icache hash function too.
891*4882a593Smuzhiyun  */
hash(dev_t dev)892*4882a593Smuzhiyun static inline unsigned long hash(dev_t dev)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	return MAJOR(dev)+MINOR(dev);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
bdev_test(struct inode * inode,void * data)897*4882a593Smuzhiyun static int bdev_test(struct inode *inode, void *data)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun 
bdev_set(struct inode * inode,void * data)902*4882a593Smuzhiyun static int bdev_set(struct inode *inode, void *data)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
905*4882a593Smuzhiyun 	return 0;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
bdget(dev_t dev)908*4882a593Smuzhiyun static struct block_device *bdget(dev_t dev)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	struct block_device *bdev;
911*4882a593Smuzhiyun 	struct inode *inode;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	inode = iget5_locked(blockdev_superblock, hash(dev),
914*4882a593Smuzhiyun 			bdev_test, bdev_set, &dev);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	if (!inode)
917*4882a593Smuzhiyun 		return NULL;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	bdev = &BDEV_I(inode)->bdev;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	if (inode->i_state & I_NEW) {
922*4882a593Smuzhiyun 		spin_lock_init(&bdev->bd_size_lock);
923*4882a593Smuzhiyun 		bdev->bd_contains = NULL;
924*4882a593Smuzhiyun 		bdev->bd_super = NULL;
925*4882a593Smuzhiyun 		bdev->bd_inode = inode;
926*4882a593Smuzhiyun 		bdev->bd_part_count = 0;
927*4882a593Smuzhiyun 		inode->i_mode = S_IFBLK;
928*4882a593Smuzhiyun 		inode->i_rdev = dev;
929*4882a593Smuzhiyun 		inode->i_bdev = bdev;
930*4882a593Smuzhiyun 		inode->i_data.a_ops = &def_blk_aops;
931*4882a593Smuzhiyun 		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
932*4882a593Smuzhiyun 		unlock_new_inode(inode);
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 	return bdev;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun /**
938*4882a593Smuzhiyun  * bdgrab -- Grab a reference to an already referenced block device
939*4882a593Smuzhiyun  * @bdev:	Block device to grab a reference to.
940*4882a593Smuzhiyun  */
bdgrab(struct block_device * bdev)941*4882a593Smuzhiyun struct block_device *bdgrab(struct block_device *bdev)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	ihold(bdev->bd_inode);
944*4882a593Smuzhiyun 	return bdev;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun EXPORT_SYMBOL(bdgrab);
947*4882a593Smuzhiyun 
bdget_part(struct hd_struct * part)948*4882a593Smuzhiyun struct block_device *bdget_part(struct hd_struct *part)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	return bdget(part_devt(part));
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
nr_blockdev_pages(void)953*4882a593Smuzhiyun long nr_blockdev_pages(void)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	struct inode *inode;
956*4882a593Smuzhiyun 	long ret = 0;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	spin_lock(&blockdev_superblock->s_inode_list_lock);
959*4882a593Smuzhiyun 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
960*4882a593Smuzhiyun 		ret += inode->i_mapping->nrpages;
961*4882a593Smuzhiyun 	spin_unlock(&blockdev_superblock->s_inode_list_lock);
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	return ret;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun 
bdput(struct block_device * bdev)966*4882a593Smuzhiyun void bdput(struct block_device *bdev)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	iput(bdev->bd_inode);
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun EXPORT_SYMBOL(bdput);
972*4882a593Smuzhiyun 
bd_acquire(struct inode * inode)973*4882a593Smuzhiyun static struct block_device *bd_acquire(struct inode *inode)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun 	struct block_device *bdev;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	spin_lock(&bdev_lock);
978*4882a593Smuzhiyun 	bdev = inode->i_bdev;
979*4882a593Smuzhiyun 	if (bdev && !inode_unhashed(bdev->bd_inode)) {
980*4882a593Smuzhiyun 		bdgrab(bdev);
981*4882a593Smuzhiyun 		spin_unlock(&bdev_lock);
982*4882a593Smuzhiyun 		return bdev;
983*4882a593Smuzhiyun 	}
984*4882a593Smuzhiyun 	spin_unlock(&bdev_lock);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	/*
987*4882a593Smuzhiyun 	 * i_bdev references block device inode that was already shut down
988*4882a593Smuzhiyun 	 * (corresponding device got removed).  Remove the reference and look
989*4882a593Smuzhiyun 	 * up block device inode again just in case new device got
990*4882a593Smuzhiyun 	 * reestablished under the same device number.
991*4882a593Smuzhiyun 	 */
992*4882a593Smuzhiyun 	if (bdev)
993*4882a593Smuzhiyun 		bd_forget(inode);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	bdev = bdget(inode->i_rdev);
996*4882a593Smuzhiyun 	if (bdev) {
997*4882a593Smuzhiyun 		spin_lock(&bdev_lock);
998*4882a593Smuzhiyun 		if (!inode->i_bdev) {
999*4882a593Smuzhiyun 			/*
1000*4882a593Smuzhiyun 			 * We take an additional reference to bd_inode,
1001*4882a593Smuzhiyun 			 * and it's released in clear_inode() of inode.
1002*4882a593Smuzhiyun 			 * So, we can access it via ->i_mapping always
1003*4882a593Smuzhiyun 			 * without igrab().
1004*4882a593Smuzhiyun 			 */
1005*4882a593Smuzhiyun 			bdgrab(bdev);
1006*4882a593Smuzhiyun 			inode->i_bdev = bdev;
1007*4882a593Smuzhiyun 			inode->i_mapping = bdev->bd_inode->i_mapping;
1008*4882a593Smuzhiyun 		}
1009*4882a593Smuzhiyun 		spin_unlock(&bdev_lock);
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun 	return bdev;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun /* Call when you free inode */
1015*4882a593Smuzhiyun 
bd_forget(struct inode * inode)1016*4882a593Smuzhiyun void bd_forget(struct inode *inode)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun 	struct block_device *bdev = NULL;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	spin_lock(&bdev_lock);
1021*4882a593Smuzhiyun 	if (!sb_is_blkdev_sb(inode->i_sb))
1022*4882a593Smuzhiyun 		bdev = inode->i_bdev;
1023*4882a593Smuzhiyun 	inode->i_bdev = NULL;
1024*4882a593Smuzhiyun 	inode->i_mapping = &inode->i_data;
1025*4882a593Smuzhiyun 	spin_unlock(&bdev_lock);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	if (bdev)
1028*4882a593Smuzhiyun 		bdput(bdev);
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun /**
1032*4882a593Smuzhiyun  * bd_may_claim - test whether a block device can be claimed
1033*4882a593Smuzhiyun  * @bdev: block device of interest
1034*4882a593Smuzhiyun  * @whole: whole block device containing @bdev, may equal @bdev
1035*4882a593Smuzhiyun  * @holder: holder trying to claim @bdev
1036*4882a593Smuzhiyun  *
1037*4882a593Smuzhiyun  * Test whether @bdev can be claimed by @holder.
1038*4882a593Smuzhiyun  *
1039*4882a593Smuzhiyun  * CONTEXT:
1040*4882a593Smuzhiyun  * spin_lock(&bdev_lock).
1041*4882a593Smuzhiyun  *
1042*4882a593Smuzhiyun  * RETURNS:
1043*4882a593Smuzhiyun  * %true if @bdev can be claimed, %false otherwise.
1044*4882a593Smuzhiyun  */
bd_may_claim(struct block_device * bdev,struct block_device * whole,void * holder)1045*4882a593Smuzhiyun static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
1046*4882a593Smuzhiyun 			 void *holder)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun 	if (bdev->bd_holder == holder)
1049*4882a593Smuzhiyun 		return true;	 /* already a holder */
1050*4882a593Smuzhiyun 	else if (bdev->bd_holder != NULL)
1051*4882a593Smuzhiyun 		return false; 	 /* held by someone else */
1052*4882a593Smuzhiyun 	else if (whole == bdev)
1053*4882a593Smuzhiyun 		return true;  	 /* is a whole device which isn't held */
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	else if (whole->bd_holder == bd_may_claim)
1056*4882a593Smuzhiyun 		return true; 	 /* is a partition of a device that is being partitioned */
1057*4882a593Smuzhiyun 	else if (whole->bd_holder != NULL)
1058*4882a593Smuzhiyun 		return false;	 /* is a partition of a held device */
1059*4882a593Smuzhiyun 	else
1060*4882a593Smuzhiyun 		return true;	 /* is a partition of an un-held device */
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun /**
1064*4882a593Smuzhiyun  * bd_prepare_to_claim - claim a block device
1065*4882a593Smuzhiyun  * @bdev: block device of interest
1066*4882a593Smuzhiyun  * @whole: the whole device containing @bdev, may equal @bdev
1067*4882a593Smuzhiyun  * @holder: holder trying to claim @bdev
1068*4882a593Smuzhiyun  *
1069*4882a593Smuzhiyun  * Claim @bdev.  This function fails if @bdev is already claimed by another
1070*4882a593Smuzhiyun  * holder and waits if another claiming is in progress. return, the caller
1071*4882a593Smuzhiyun  * has ownership of bd_claiming and bd_holder[s].
1072*4882a593Smuzhiyun  *
1073*4882a593Smuzhiyun  * RETURNS:
1074*4882a593Smuzhiyun  * 0 if @bdev can be claimed, -EBUSY otherwise.
1075*4882a593Smuzhiyun  */
bd_prepare_to_claim(struct block_device * bdev,struct block_device * whole,void * holder)1076*4882a593Smuzhiyun int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
1077*4882a593Smuzhiyun 		void *holder)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun retry:
1080*4882a593Smuzhiyun 	spin_lock(&bdev_lock);
1081*4882a593Smuzhiyun 	/* if someone else claimed, fail */
1082*4882a593Smuzhiyun 	if (!bd_may_claim(bdev, whole, holder)) {
1083*4882a593Smuzhiyun 		spin_unlock(&bdev_lock);
1084*4882a593Smuzhiyun 		return -EBUSY;
1085*4882a593Smuzhiyun 	}
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* if claiming is already in progress, wait for it to finish */
1088*4882a593Smuzhiyun 	if (whole->bd_claiming) {
1089*4882a593Smuzhiyun 		wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
1090*4882a593Smuzhiyun 		DEFINE_WAIT(wait);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
1093*4882a593Smuzhiyun 		spin_unlock(&bdev_lock);
1094*4882a593Smuzhiyun 		schedule();
1095*4882a593Smuzhiyun 		finish_wait(wq, &wait);
1096*4882a593Smuzhiyun 		goto retry;
1097*4882a593Smuzhiyun 	}
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	/* yay, all mine */
1100*4882a593Smuzhiyun 	whole->bd_claiming = holder;
1101*4882a593Smuzhiyun 	spin_unlock(&bdev_lock);
1102*4882a593Smuzhiyun 	return 0;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
1105*4882a593Smuzhiyun 
bdev_get_gendisk(struct block_device * bdev,int * partno)1106*4882a593Smuzhiyun static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun 	struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	if (!disk)
1111*4882a593Smuzhiyun 		return NULL;
1112*4882a593Smuzhiyun 	/*
1113*4882a593Smuzhiyun 	 * Now that we hold gendisk reference we make sure bdev we looked up is
1114*4882a593Smuzhiyun 	 * not stale. If it is, it means device got removed and created before
1115*4882a593Smuzhiyun 	 * we looked up gendisk and we fail open in such case. Associating
1116*4882a593Smuzhiyun 	 * unhashed bdev with newly created gendisk could lead to two bdevs
1117*4882a593Smuzhiyun 	 * (and thus two independent caches) being associated with one device
1118*4882a593Smuzhiyun 	 * which is bad.
1119*4882a593Smuzhiyun 	 */
1120*4882a593Smuzhiyun 	if (inode_unhashed(bdev->bd_inode)) {
1121*4882a593Smuzhiyun 		put_disk_and_module(disk);
1122*4882a593Smuzhiyun 		return NULL;
1123*4882a593Smuzhiyun 	}
1124*4882a593Smuzhiyun 	return disk;
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun 
bd_clear_claiming(struct block_device * whole,void * holder)1127*4882a593Smuzhiyun static void bd_clear_claiming(struct block_device *whole, void *holder)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun 	lockdep_assert_held(&bdev_lock);
1130*4882a593Smuzhiyun 	/* tell others that we're done */
1131*4882a593Smuzhiyun 	BUG_ON(whole->bd_claiming != holder);
1132*4882a593Smuzhiyun 	whole->bd_claiming = NULL;
1133*4882a593Smuzhiyun 	wake_up_bit(&whole->bd_claiming, 0);
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun /**
1137*4882a593Smuzhiyun  * bd_finish_claiming - finish claiming of a block device
1138*4882a593Smuzhiyun  * @bdev: block device of interest
1139*4882a593Smuzhiyun  * @whole: whole block device
1140*4882a593Smuzhiyun  * @holder: holder that has claimed @bdev
1141*4882a593Smuzhiyun  *
1142*4882a593Smuzhiyun  * Finish exclusive open of a block device. Mark the device as exlusively
1143*4882a593Smuzhiyun  * open by the holder and wake up all waiters for exclusive open to finish.
1144*4882a593Smuzhiyun  */
bd_finish_claiming(struct block_device * bdev,struct block_device * whole,void * holder)1145*4882a593Smuzhiyun static void bd_finish_claiming(struct block_device *bdev,
1146*4882a593Smuzhiyun 		struct block_device *whole, void *holder)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun 	spin_lock(&bdev_lock);
1149*4882a593Smuzhiyun 	BUG_ON(!bd_may_claim(bdev, whole, holder));
1150*4882a593Smuzhiyun 	/*
1151*4882a593Smuzhiyun 	 * Note that for a whole device bd_holders will be incremented twice,
1152*4882a593Smuzhiyun 	 * and bd_holder will be set to bd_may_claim before being set to holder
1153*4882a593Smuzhiyun 	 */
1154*4882a593Smuzhiyun 	whole->bd_holders++;
1155*4882a593Smuzhiyun 	whole->bd_holder = bd_may_claim;
1156*4882a593Smuzhiyun 	bdev->bd_holders++;
1157*4882a593Smuzhiyun 	bdev->bd_holder = holder;
1158*4882a593Smuzhiyun 	bd_clear_claiming(whole, holder);
1159*4882a593Smuzhiyun 	spin_unlock(&bdev_lock);
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun /**
1163*4882a593Smuzhiyun  * bd_abort_claiming - abort claiming of a block device
1164*4882a593Smuzhiyun  * @bdev: block device of interest
1165*4882a593Smuzhiyun  * @whole: whole block device
1166*4882a593Smuzhiyun  * @holder: holder that has claimed @bdev
1167*4882a593Smuzhiyun  *
1168*4882a593Smuzhiyun  * Abort claiming of a block device when the exclusive open failed. This can be
1169*4882a593Smuzhiyun  * also used when exclusive open is not actually desired and we just needed
1170*4882a593Smuzhiyun  * to block other exclusive openers for a while.
1171*4882a593Smuzhiyun  */
bd_abort_claiming(struct block_device * bdev,struct block_device * whole,void * holder)1172*4882a593Smuzhiyun void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
1173*4882a593Smuzhiyun 		       void *holder)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	spin_lock(&bdev_lock);
1176*4882a593Smuzhiyun 	bd_clear_claiming(whole, holder);
1177*4882a593Smuzhiyun 	spin_unlock(&bdev_lock);
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun EXPORT_SYMBOL(bd_abort_claiming);
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1182*4882a593Smuzhiyun struct bd_holder_disk {
1183*4882a593Smuzhiyun 	struct list_head	list;
1184*4882a593Smuzhiyun 	struct gendisk		*disk;
1185*4882a593Smuzhiyun 	int			refcnt;
1186*4882a593Smuzhiyun };
1187*4882a593Smuzhiyun 
bd_find_holder_disk(struct block_device * bdev,struct gendisk * disk)1188*4882a593Smuzhiyun static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
1189*4882a593Smuzhiyun 						  struct gendisk *disk)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct bd_holder_disk *holder;
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	list_for_each_entry(holder, &bdev->bd_holder_disks, list)
1194*4882a593Smuzhiyun 		if (holder->disk == disk)
1195*4882a593Smuzhiyun 			return holder;
1196*4882a593Smuzhiyun 	return NULL;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun 
add_symlink(struct kobject * from,struct kobject * to)1199*4882a593Smuzhiyun static int add_symlink(struct kobject *from, struct kobject *to)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun 	return sysfs_create_link(from, to, kobject_name(to));
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun 
del_symlink(struct kobject * from,struct kobject * to)1204*4882a593Smuzhiyun static void del_symlink(struct kobject *from, struct kobject *to)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun 	sysfs_remove_link(from, kobject_name(to));
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun /**
1210*4882a593Smuzhiyun  * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1211*4882a593Smuzhiyun  * @bdev: the claimed slave bdev
1212*4882a593Smuzhiyun  * @disk: the holding disk
1213*4882a593Smuzhiyun  *
1214*4882a593Smuzhiyun  * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1215*4882a593Smuzhiyun  *
1216*4882a593Smuzhiyun  * This functions creates the following sysfs symlinks.
1217*4882a593Smuzhiyun  *
1218*4882a593Smuzhiyun  * - from "slaves" directory of the holder @disk to the claimed @bdev
1219*4882a593Smuzhiyun  * - from "holders" directory of the @bdev to the holder @disk
1220*4882a593Smuzhiyun  *
1221*4882a593Smuzhiyun  * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1222*4882a593Smuzhiyun  * passed to bd_link_disk_holder(), then:
1223*4882a593Smuzhiyun  *
1224*4882a593Smuzhiyun  *   /sys/block/dm-0/slaves/sda --> /sys/block/sda
1225*4882a593Smuzhiyun  *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1226*4882a593Smuzhiyun  *
1227*4882a593Smuzhiyun  * The caller must have claimed @bdev before calling this function and
1228*4882a593Smuzhiyun  * ensure that both @bdev and @disk are valid during the creation and
1229*4882a593Smuzhiyun  * lifetime of these symlinks.
1230*4882a593Smuzhiyun  *
1231*4882a593Smuzhiyun  * CONTEXT:
1232*4882a593Smuzhiyun  * Might sleep.
1233*4882a593Smuzhiyun  *
1234*4882a593Smuzhiyun  * RETURNS:
1235*4882a593Smuzhiyun  * 0 on success, -errno on failure.
1236*4882a593Smuzhiyun  */
bd_link_disk_holder(struct block_device * bdev,struct gendisk * disk)1237*4882a593Smuzhiyun int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun 	struct bd_holder_disk *holder;
1240*4882a593Smuzhiyun 	int ret = 0;
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	mutex_lock(&bdev->bd_mutex);
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	WARN_ON_ONCE(!bdev->bd_holder);
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	/* FIXME: remove the following once add_disk() handles errors */
1247*4882a593Smuzhiyun 	if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
1248*4882a593Smuzhiyun 		goto out_unlock;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	holder = bd_find_holder_disk(bdev, disk);
1251*4882a593Smuzhiyun 	if (holder) {
1252*4882a593Smuzhiyun 		holder->refcnt++;
1253*4882a593Smuzhiyun 		goto out_unlock;
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	holder = kzalloc(sizeof(*holder), GFP_KERNEL);
1257*4882a593Smuzhiyun 	if (!holder) {
1258*4882a593Smuzhiyun 		ret = -ENOMEM;
1259*4882a593Smuzhiyun 		goto out_unlock;
1260*4882a593Smuzhiyun 	}
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	INIT_LIST_HEAD(&holder->list);
1263*4882a593Smuzhiyun 	holder->disk = disk;
1264*4882a593Smuzhiyun 	holder->refcnt = 1;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1267*4882a593Smuzhiyun 	if (ret)
1268*4882a593Smuzhiyun 		goto out_free;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1271*4882a593Smuzhiyun 	if (ret)
1272*4882a593Smuzhiyun 		goto out_del;
1273*4882a593Smuzhiyun 	/*
1274*4882a593Smuzhiyun 	 * bdev could be deleted beneath us which would implicitly destroy
1275*4882a593Smuzhiyun 	 * the holder directory.  Hold on to it.
1276*4882a593Smuzhiyun 	 */
1277*4882a593Smuzhiyun 	kobject_get(bdev->bd_part->holder_dir);
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	list_add(&holder->list, &bdev->bd_holder_disks);
1280*4882a593Smuzhiyun 	goto out_unlock;
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun out_del:
1283*4882a593Smuzhiyun 	del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1284*4882a593Smuzhiyun out_free:
1285*4882a593Smuzhiyun 	kfree(holder);
1286*4882a593Smuzhiyun out_unlock:
1287*4882a593Smuzhiyun 	mutex_unlock(&bdev->bd_mutex);
1288*4882a593Smuzhiyun 	return ret;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bd_link_disk_holder);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun /**
1293*4882a593Smuzhiyun  * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1294*4882a593Smuzhiyun  * @bdev: the calimed slave bdev
1295*4882a593Smuzhiyun  * @disk: the holding disk
1296*4882a593Smuzhiyun  *
1297*4882a593Smuzhiyun  * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1298*4882a593Smuzhiyun  *
1299*4882a593Smuzhiyun  * CONTEXT:
1300*4882a593Smuzhiyun  * Might sleep.
1301*4882a593Smuzhiyun  */
bd_unlink_disk_holder(struct block_device * bdev,struct gendisk * disk)1302*4882a593Smuzhiyun void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	struct bd_holder_disk *holder;
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	mutex_lock(&bdev->bd_mutex);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	holder = bd_find_holder_disk(bdev, disk);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
1311*4882a593Smuzhiyun 		del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1312*4882a593Smuzhiyun 		del_symlink(bdev->bd_part->holder_dir,
1313*4882a593Smuzhiyun 			    &disk_to_dev(disk)->kobj);
1314*4882a593Smuzhiyun 		kobject_put(bdev->bd_part->holder_dir);
1315*4882a593Smuzhiyun 		list_del_init(&holder->list);
1316*4882a593Smuzhiyun 		kfree(holder);
1317*4882a593Smuzhiyun 	}
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	mutex_unlock(&bdev->bd_mutex);
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
1322*4882a593Smuzhiyun #endif
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun /**
1325*4882a593Smuzhiyun  * check_disk_size_change - checks for disk size change and adjusts bdev size.
1326*4882a593Smuzhiyun  * @disk: struct gendisk to check
1327*4882a593Smuzhiyun  * @bdev: struct bdev to adjust.
1328*4882a593Smuzhiyun  * @verbose: if %true log a message about a size change if there is any
1329*4882a593Smuzhiyun  *
1330*4882a593Smuzhiyun  * This routine checks to see if the bdev size does not match the disk size
1331*4882a593Smuzhiyun  * and adjusts it if it differs. When shrinking the bdev size, its all caches
1332*4882a593Smuzhiyun  * are freed.
1333*4882a593Smuzhiyun  */
check_disk_size_change(struct gendisk * disk,struct block_device * bdev,bool verbose)1334*4882a593Smuzhiyun static void check_disk_size_change(struct gendisk *disk,
1335*4882a593Smuzhiyun 		struct block_device *bdev, bool verbose)
1336*4882a593Smuzhiyun {
1337*4882a593Smuzhiyun 	loff_t disk_size, bdev_size;
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	spin_lock(&bdev->bd_size_lock);
1340*4882a593Smuzhiyun 	disk_size = (loff_t)get_capacity(disk) << 9;
1341*4882a593Smuzhiyun 	bdev_size = i_size_read(bdev->bd_inode);
1342*4882a593Smuzhiyun 	if (disk_size != bdev_size) {
1343*4882a593Smuzhiyun 		if (verbose) {
1344*4882a593Smuzhiyun 			printk(KERN_INFO
1345*4882a593Smuzhiyun 			       "%s: detected capacity change from %lld to %lld\n",
1346*4882a593Smuzhiyun 			       disk->disk_name, bdev_size, disk_size);
1347*4882a593Smuzhiyun 		}
1348*4882a593Smuzhiyun 		i_size_write(bdev->bd_inode, disk_size);
1349*4882a593Smuzhiyun 	}
1350*4882a593Smuzhiyun 	spin_unlock(&bdev->bd_size_lock);
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	if (bdev_size > disk_size) {
1353*4882a593Smuzhiyun 		if (__invalidate_device(bdev, false))
1354*4882a593Smuzhiyun 			pr_warn("VFS: busy inodes on resized disk %s\n",
1355*4882a593Smuzhiyun 				disk->disk_name);
1356*4882a593Smuzhiyun 	}
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun /**
1360*4882a593Smuzhiyun  * revalidate_disk_size - checks for disk size change and adjusts bdev size.
1361*4882a593Smuzhiyun  * @disk: struct gendisk to check
1362*4882a593Smuzhiyun  * @verbose: if %true log a message about a size change if there is any
1363*4882a593Smuzhiyun  *
1364*4882a593Smuzhiyun  * This routine checks to see if the bdev size does not match the disk size
1365*4882a593Smuzhiyun  * and adjusts it if it differs. When shrinking the bdev size, its all caches
1366*4882a593Smuzhiyun  * are freed.
1367*4882a593Smuzhiyun  */
revalidate_disk_size(struct gendisk * disk,bool verbose)1368*4882a593Smuzhiyun void revalidate_disk_size(struct gendisk *disk, bool verbose)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun 	struct block_device *bdev;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	/*
1373*4882a593Smuzhiyun 	 * Hidden disks don't have associated bdev so there's no point in
1374*4882a593Smuzhiyun 	 * revalidating them.
1375*4882a593Smuzhiyun 	 */
1376*4882a593Smuzhiyun 	if (disk->flags & GENHD_FL_HIDDEN)
1377*4882a593Smuzhiyun 		return;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	bdev = bdget_disk(disk, 0);
1380*4882a593Smuzhiyun 	if (bdev) {
1381*4882a593Smuzhiyun 		check_disk_size_change(disk, bdev, verbose);
1382*4882a593Smuzhiyun 		bdput(bdev);
1383*4882a593Smuzhiyun 	}
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun EXPORT_SYMBOL(revalidate_disk_size);
1386*4882a593Smuzhiyun 
bd_set_nr_sectors(struct block_device * bdev,sector_t sectors)1387*4882a593Smuzhiyun void bd_set_nr_sectors(struct block_device *bdev, sector_t sectors)
1388*4882a593Smuzhiyun {
1389*4882a593Smuzhiyun 	spin_lock(&bdev->bd_size_lock);
1390*4882a593Smuzhiyun 	i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
1391*4882a593Smuzhiyun 	spin_unlock(&bdev->bd_size_lock);
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun EXPORT_SYMBOL(bd_set_nr_sectors);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
1396*4882a593Smuzhiyun 
bdev_disk_changed(struct block_device * bdev,bool invalidate)1397*4882a593Smuzhiyun int bdev_disk_changed(struct block_device *bdev, bool invalidate)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun 	struct gendisk *disk = bdev->bd_disk;
1400*4882a593Smuzhiyun 	int ret;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	lockdep_assert_held(&bdev->bd_mutex);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	if (!(disk->flags & GENHD_FL_UP))
1405*4882a593Smuzhiyun 		return -ENXIO;
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun rescan:
1408*4882a593Smuzhiyun 	ret = blk_drop_partitions(bdev);
1409*4882a593Smuzhiyun 	if (ret)
1410*4882a593Smuzhiyun 		return ret;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	clear_bit(GD_NEED_PART_SCAN, &disk->state);
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	/*
1415*4882a593Smuzhiyun 	 * Historically we only set the capacity to zero for devices that
1416*4882a593Smuzhiyun 	 * support partitions (independ of actually having partitions created).
1417*4882a593Smuzhiyun 	 * Doing that is rather inconsistent, but changing it broke legacy
1418*4882a593Smuzhiyun 	 * udisks polling for legacy ide-cdrom devices.  Use the crude check
1419*4882a593Smuzhiyun 	 * below to get the sane behavior for most device while not breaking
1420*4882a593Smuzhiyun 	 * userspace for this particular setup.
1421*4882a593Smuzhiyun 	 */
1422*4882a593Smuzhiyun 	if (invalidate) {
1423*4882a593Smuzhiyun 		if (disk_part_scan_enabled(disk) ||
1424*4882a593Smuzhiyun 		    !(disk->flags & GENHD_FL_REMOVABLE))
1425*4882a593Smuzhiyun 			set_capacity(disk, 0);
1426*4882a593Smuzhiyun 	} else {
1427*4882a593Smuzhiyun 		if (disk->fops->revalidate_disk)
1428*4882a593Smuzhiyun 			disk->fops->revalidate_disk(disk);
1429*4882a593Smuzhiyun 	}
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	check_disk_size_change(disk, bdev, !invalidate);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	if (get_capacity(disk)) {
1434*4882a593Smuzhiyun 		ret = blk_add_partitions(disk, bdev);
1435*4882a593Smuzhiyun 		if (ret == -EAGAIN)
1436*4882a593Smuzhiyun 			goto rescan;
1437*4882a593Smuzhiyun 	} else if (invalidate) {
1438*4882a593Smuzhiyun 		/*
1439*4882a593Smuzhiyun 		 * Tell userspace that the media / partition table may have
1440*4882a593Smuzhiyun 		 * changed.
1441*4882a593Smuzhiyun 		 */
1442*4882a593Smuzhiyun 		kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
1443*4882a593Smuzhiyun 	}
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 	return ret;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun /*
1448*4882a593Smuzhiyun  * Only exported for for loop and dasd for historic reasons.  Don't use in new
1449*4882a593Smuzhiyun  * code!
1450*4882a593Smuzhiyun  */
1451*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bdev_disk_changed);
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun /*
1454*4882a593Smuzhiyun  * bd_mutex locking:
1455*4882a593Smuzhiyun  *
1456*4882a593Smuzhiyun  *  mutex_lock(part->bd_mutex)
1457*4882a593Smuzhiyun  *    mutex_lock_nested(whole->bd_mutex, 1)
1458*4882a593Smuzhiyun  */
1459*4882a593Smuzhiyun 
__blkdev_get(struct block_device * bdev,fmode_t mode,void * holder,int for_part)1460*4882a593Smuzhiyun static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder,
1461*4882a593Smuzhiyun 		int for_part)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun 	struct block_device *whole = NULL, *claiming = NULL;
1464*4882a593Smuzhiyun 	struct gendisk *disk;
1465*4882a593Smuzhiyun 	int ret;
1466*4882a593Smuzhiyun 	int partno;
1467*4882a593Smuzhiyun 	bool first_open = false, unblock_events = true, need_restart;
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun  restart:
1470*4882a593Smuzhiyun 	need_restart = false;
1471*4882a593Smuzhiyun 	ret = -ENXIO;
1472*4882a593Smuzhiyun 	disk = bdev_get_gendisk(bdev, &partno);
1473*4882a593Smuzhiyun 	if (!disk)
1474*4882a593Smuzhiyun 		goto out;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	if (partno) {
1477*4882a593Smuzhiyun 		whole = bdget_disk(disk, 0);
1478*4882a593Smuzhiyun 		if (!whole) {
1479*4882a593Smuzhiyun 			ret = -ENOMEM;
1480*4882a593Smuzhiyun 			goto out_put_disk;
1481*4882a593Smuzhiyun 		}
1482*4882a593Smuzhiyun 	}
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	if (!for_part && (mode & FMODE_EXCL)) {
1485*4882a593Smuzhiyun 		WARN_ON_ONCE(!holder);
1486*4882a593Smuzhiyun 		if (whole)
1487*4882a593Smuzhiyun 			claiming = whole;
1488*4882a593Smuzhiyun 		else
1489*4882a593Smuzhiyun 			claiming = bdev;
1490*4882a593Smuzhiyun 		ret = bd_prepare_to_claim(bdev, claiming, holder);
1491*4882a593Smuzhiyun 		if (ret)
1492*4882a593Smuzhiyun 			goto out_put_whole;
1493*4882a593Smuzhiyun 	}
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	disk_block_events(disk);
1496*4882a593Smuzhiyun 	mutex_lock_nested(&bdev->bd_mutex, for_part);
1497*4882a593Smuzhiyun 	if (!bdev->bd_openers) {
1498*4882a593Smuzhiyun 		first_open = true;
1499*4882a593Smuzhiyun 		bdev->bd_disk = disk;
1500*4882a593Smuzhiyun 		bdev->bd_contains = bdev;
1501*4882a593Smuzhiyun 		bdev->bd_partno = partno;
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 		if (!partno) {
1504*4882a593Smuzhiyun 			ret = -ENXIO;
1505*4882a593Smuzhiyun 			bdev->bd_part = disk_get_part(disk, partno);
1506*4882a593Smuzhiyun 			if (!bdev->bd_part)
1507*4882a593Smuzhiyun 				goto out_clear;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 			ret = 0;
1510*4882a593Smuzhiyun 			if (disk->fops->open) {
1511*4882a593Smuzhiyun 				ret = disk->fops->open(bdev, mode);
1512*4882a593Smuzhiyun 				/*
1513*4882a593Smuzhiyun 				 * If we lost a race with 'disk' being deleted,
1514*4882a593Smuzhiyun 				 * try again.  See md.c
1515*4882a593Smuzhiyun 				 */
1516*4882a593Smuzhiyun 				if (ret == -ERESTARTSYS)
1517*4882a593Smuzhiyun 					need_restart = true;
1518*4882a593Smuzhiyun 			}
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 			if (!ret) {
1521*4882a593Smuzhiyun 				bd_set_nr_sectors(bdev, get_capacity(disk));
1522*4882a593Smuzhiyun 				set_init_blocksize(bdev);
1523*4882a593Smuzhiyun 			}
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 			/*
1526*4882a593Smuzhiyun 			 * If the device is invalidated, rescan partition
1527*4882a593Smuzhiyun 			 * if open succeeded or failed with -ENOMEDIUM.
1528*4882a593Smuzhiyun 			 * The latter is necessary to prevent ghost
1529*4882a593Smuzhiyun 			 * partitions on a removed medium.
1530*4882a593Smuzhiyun 			 */
1531*4882a593Smuzhiyun 			if (test_bit(GD_NEED_PART_SCAN, &disk->state) &&
1532*4882a593Smuzhiyun 			    (!ret || ret == -ENOMEDIUM))
1533*4882a593Smuzhiyun 				bdev_disk_changed(bdev, ret == -ENOMEDIUM);
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 			if (ret)
1536*4882a593Smuzhiyun 				goto out_clear;
1537*4882a593Smuzhiyun 		} else {
1538*4882a593Smuzhiyun 			BUG_ON(for_part);
1539*4882a593Smuzhiyun 			ret = __blkdev_get(whole, mode, NULL, 1);
1540*4882a593Smuzhiyun 			if (ret)
1541*4882a593Smuzhiyun 				goto out_clear;
1542*4882a593Smuzhiyun 			bdev->bd_contains = bdgrab(whole);
1543*4882a593Smuzhiyun 			bdev->bd_part = disk_get_part(disk, partno);
1544*4882a593Smuzhiyun 			if (!(disk->flags & GENHD_FL_UP) ||
1545*4882a593Smuzhiyun 			    !bdev->bd_part || !bdev->bd_part->nr_sects) {
1546*4882a593Smuzhiyun 				ret = -ENXIO;
1547*4882a593Smuzhiyun 				goto out_clear;
1548*4882a593Smuzhiyun 			}
1549*4882a593Smuzhiyun 			bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects);
1550*4882a593Smuzhiyun 			set_init_blocksize(bdev);
1551*4882a593Smuzhiyun 		}
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 		if (bdev->bd_bdi == &noop_backing_dev_info)
1554*4882a593Smuzhiyun 			bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
1555*4882a593Smuzhiyun 	} else {
1556*4882a593Smuzhiyun 		if (bdev->bd_contains == bdev) {
1557*4882a593Smuzhiyun 			ret = 0;
1558*4882a593Smuzhiyun 			if (bdev->bd_disk->fops->open)
1559*4882a593Smuzhiyun 				ret = bdev->bd_disk->fops->open(bdev, mode);
1560*4882a593Smuzhiyun 			/* the same as first opener case, read comment there */
1561*4882a593Smuzhiyun 			if (test_bit(GD_NEED_PART_SCAN, &disk->state) &&
1562*4882a593Smuzhiyun 			    (!ret || ret == -ENOMEDIUM))
1563*4882a593Smuzhiyun 				bdev_disk_changed(bdev, ret == -ENOMEDIUM);
1564*4882a593Smuzhiyun 			if (ret)
1565*4882a593Smuzhiyun 				goto out_unlock_bdev;
1566*4882a593Smuzhiyun 		}
1567*4882a593Smuzhiyun 	}
1568*4882a593Smuzhiyun 	bdev->bd_openers++;
1569*4882a593Smuzhiyun 	if (for_part)
1570*4882a593Smuzhiyun 		bdev->bd_part_count++;
1571*4882a593Smuzhiyun 	if (claiming)
1572*4882a593Smuzhiyun 		bd_finish_claiming(bdev, claiming, holder);
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	/*
1575*4882a593Smuzhiyun 	 * Block event polling for write claims if requested.  Any write holder
1576*4882a593Smuzhiyun 	 * makes the write_holder state stick until all are released.  This is
1577*4882a593Smuzhiyun 	 * good enough and tracking individual writeable reference is too
1578*4882a593Smuzhiyun 	 * fragile given the way @mode is used in blkdev_get/put().
1579*4882a593Smuzhiyun 	 */
1580*4882a593Smuzhiyun 	if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1581*4882a593Smuzhiyun 	    (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
1582*4882a593Smuzhiyun 		bdev->bd_write_holder = true;
1583*4882a593Smuzhiyun 		unblock_events = false;
1584*4882a593Smuzhiyun 	}
1585*4882a593Smuzhiyun 	mutex_unlock(&bdev->bd_mutex);
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	if (unblock_events)
1588*4882a593Smuzhiyun 		disk_unblock_events(disk);
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	/* only one opener holds refs to the module and disk */
1591*4882a593Smuzhiyun 	if (!first_open)
1592*4882a593Smuzhiyun 		put_disk_and_module(disk);
1593*4882a593Smuzhiyun 	if (whole)
1594*4882a593Smuzhiyun 		bdput(whole);
1595*4882a593Smuzhiyun 	return 0;
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun  out_clear:
1598*4882a593Smuzhiyun 	disk_put_part(bdev->bd_part);
1599*4882a593Smuzhiyun 	bdev->bd_disk = NULL;
1600*4882a593Smuzhiyun 	bdev->bd_part = NULL;
1601*4882a593Smuzhiyun 	if (bdev != bdev->bd_contains)
1602*4882a593Smuzhiyun 		__blkdev_put(bdev->bd_contains, mode, 1);
1603*4882a593Smuzhiyun 	bdev->bd_contains = NULL;
1604*4882a593Smuzhiyun  out_unlock_bdev:
1605*4882a593Smuzhiyun 	if (claiming)
1606*4882a593Smuzhiyun 		bd_abort_claiming(bdev, claiming, holder);
1607*4882a593Smuzhiyun 	mutex_unlock(&bdev->bd_mutex);
1608*4882a593Smuzhiyun 	disk_unblock_events(disk);
1609*4882a593Smuzhiyun  out_put_whole:
1610*4882a593Smuzhiyun  	if (whole)
1611*4882a593Smuzhiyun 		bdput(whole);
1612*4882a593Smuzhiyun  out_put_disk:
1613*4882a593Smuzhiyun 	put_disk_and_module(disk);
1614*4882a593Smuzhiyun 	if (need_restart)
1615*4882a593Smuzhiyun 		goto restart;
1616*4882a593Smuzhiyun  out:
1617*4882a593Smuzhiyun 	return ret;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun /**
1621*4882a593Smuzhiyun  * blkdev_get - open a block device
1622*4882a593Smuzhiyun  * @bdev: block_device to open
1623*4882a593Smuzhiyun  * @mode: FMODE_* mask
1624*4882a593Smuzhiyun  * @holder: exclusive holder identifier
1625*4882a593Smuzhiyun  *
1626*4882a593Smuzhiyun  * Open @bdev with @mode.  If @mode includes %FMODE_EXCL, @bdev is
1627*4882a593Smuzhiyun  * open with exclusive access.  Specifying %FMODE_EXCL with %NULL
1628*4882a593Smuzhiyun  * @holder is invalid.  Exclusive opens may nest for the same @holder.
1629*4882a593Smuzhiyun  *
1630*4882a593Smuzhiyun  * On success, the reference count of @bdev is unchanged.  On failure,
1631*4882a593Smuzhiyun  * @bdev is put.
1632*4882a593Smuzhiyun  *
1633*4882a593Smuzhiyun  * CONTEXT:
1634*4882a593Smuzhiyun  * Might sleep.
1635*4882a593Smuzhiyun  *
1636*4882a593Smuzhiyun  * RETURNS:
1637*4882a593Smuzhiyun  * 0 on success, -errno on failure.
1638*4882a593Smuzhiyun  */
blkdev_get(struct block_device * bdev,fmode_t mode,void * holder)1639*4882a593Smuzhiyun static int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun 	int ret, perm = 0;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	if (mode & FMODE_READ)
1644*4882a593Smuzhiyun 		perm |= MAY_READ;
1645*4882a593Smuzhiyun 	if (mode & FMODE_WRITE)
1646*4882a593Smuzhiyun 		perm |= MAY_WRITE;
1647*4882a593Smuzhiyun 	ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1648*4882a593Smuzhiyun 	if (ret)
1649*4882a593Smuzhiyun 		goto bdput;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	ret =__blkdev_get(bdev, mode, holder, 0);
1652*4882a593Smuzhiyun 	if (ret)
1653*4882a593Smuzhiyun 		goto bdput;
1654*4882a593Smuzhiyun 	return 0;
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun bdput:
1657*4882a593Smuzhiyun 	bdput(bdev);
1658*4882a593Smuzhiyun 	return ret;
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun /**
1662*4882a593Smuzhiyun  * blkdev_get_by_path - open a block device by name
1663*4882a593Smuzhiyun  * @path: path to the block device to open
1664*4882a593Smuzhiyun  * @mode: FMODE_* mask
1665*4882a593Smuzhiyun  * @holder: exclusive holder identifier
1666*4882a593Smuzhiyun  *
1667*4882a593Smuzhiyun  * Open the blockdevice described by the device file at @path.  @mode
1668*4882a593Smuzhiyun  * and @holder are identical to blkdev_get().
1669*4882a593Smuzhiyun  *
1670*4882a593Smuzhiyun  * On success, the returned block_device has reference count of one.
1671*4882a593Smuzhiyun  *
1672*4882a593Smuzhiyun  * CONTEXT:
1673*4882a593Smuzhiyun  * Might sleep.
1674*4882a593Smuzhiyun  *
1675*4882a593Smuzhiyun  * RETURNS:
1676*4882a593Smuzhiyun  * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1677*4882a593Smuzhiyun  */
blkdev_get_by_path(const char * path,fmode_t mode,void * holder)1678*4882a593Smuzhiyun struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1679*4882a593Smuzhiyun 					void *holder)
1680*4882a593Smuzhiyun {
1681*4882a593Smuzhiyun 	struct block_device *bdev;
1682*4882a593Smuzhiyun 	int err;
1683*4882a593Smuzhiyun 
1684*4882a593Smuzhiyun 	bdev = lookup_bdev(path);
1685*4882a593Smuzhiyun 	if (IS_ERR(bdev))
1686*4882a593Smuzhiyun 		return bdev;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 	err = blkdev_get(bdev, mode, holder);
1689*4882a593Smuzhiyun 	if (err)
1690*4882a593Smuzhiyun 		return ERR_PTR(err);
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1693*4882a593Smuzhiyun 		blkdev_put(bdev, mode);
1694*4882a593Smuzhiyun 		return ERR_PTR(-EACCES);
1695*4882a593Smuzhiyun 	}
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	return bdev;
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun EXPORT_SYMBOL(blkdev_get_by_path);
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun /**
1702*4882a593Smuzhiyun  * blkdev_get_by_dev - open a block device by device number
1703*4882a593Smuzhiyun  * @dev: device number of block device to open
1704*4882a593Smuzhiyun  * @mode: FMODE_* mask
1705*4882a593Smuzhiyun  * @holder: exclusive holder identifier
1706*4882a593Smuzhiyun  *
1707*4882a593Smuzhiyun  * Open the blockdevice described by device number @dev.  @mode and
1708*4882a593Smuzhiyun  * @holder are identical to blkdev_get().
1709*4882a593Smuzhiyun  *
1710*4882a593Smuzhiyun  * Use it ONLY if you really do not have anything better - i.e. when
1711*4882a593Smuzhiyun  * you are behind a truly sucky interface and all you are given is a
1712*4882a593Smuzhiyun  * device number.  _Never_ to be used for internal purposes.  If you
1713*4882a593Smuzhiyun  * ever need it - reconsider your API.
1714*4882a593Smuzhiyun  *
1715*4882a593Smuzhiyun  * On success, the returned block_device has reference count of one.
1716*4882a593Smuzhiyun  *
1717*4882a593Smuzhiyun  * CONTEXT:
1718*4882a593Smuzhiyun  * Might sleep.
1719*4882a593Smuzhiyun  *
1720*4882a593Smuzhiyun  * RETURNS:
1721*4882a593Smuzhiyun  * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1722*4882a593Smuzhiyun  */
blkdev_get_by_dev(dev_t dev,fmode_t mode,void * holder)1723*4882a593Smuzhiyun struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1724*4882a593Smuzhiyun {
1725*4882a593Smuzhiyun 	struct block_device *bdev;
1726*4882a593Smuzhiyun 	int err;
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	bdev = bdget(dev);
1729*4882a593Smuzhiyun 	if (!bdev)
1730*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	err = blkdev_get(bdev, mode, holder);
1733*4882a593Smuzhiyun 	if (err)
1734*4882a593Smuzhiyun 		return ERR_PTR(err);
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	return bdev;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun EXPORT_SYMBOL(blkdev_get_by_dev);
1739*4882a593Smuzhiyun 
blkdev_open(struct inode * inode,struct file * filp)1740*4882a593Smuzhiyun static int blkdev_open(struct inode * inode, struct file * filp)
1741*4882a593Smuzhiyun {
1742*4882a593Smuzhiyun 	struct block_device *bdev;
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	/*
1745*4882a593Smuzhiyun 	 * Preserve backwards compatibility and allow large file access
1746*4882a593Smuzhiyun 	 * even if userspace doesn't ask for it explicitly. Some mkfs
1747*4882a593Smuzhiyun 	 * binary needs it. We might want to drop this workaround
1748*4882a593Smuzhiyun 	 * during an unstable branch.
1749*4882a593Smuzhiyun 	 */
1750*4882a593Smuzhiyun 	filp->f_flags |= O_LARGEFILE;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	if (filp->f_flags & O_NDELAY)
1755*4882a593Smuzhiyun 		filp->f_mode |= FMODE_NDELAY;
1756*4882a593Smuzhiyun 	if (filp->f_flags & O_EXCL)
1757*4882a593Smuzhiyun 		filp->f_mode |= FMODE_EXCL;
1758*4882a593Smuzhiyun 	if ((filp->f_flags & O_ACCMODE) == 3)
1759*4882a593Smuzhiyun 		filp->f_mode |= FMODE_WRITE_IOCTL;
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	bdev = bd_acquire(inode);
1762*4882a593Smuzhiyun 	if (bdev == NULL)
1763*4882a593Smuzhiyun 		return -ENOMEM;
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	filp->f_mapping = bdev->bd_inode->i_mapping;
1766*4882a593Smuzhiyun 	filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	return blkdev_get(bdev, filp->f_mode, filp);
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun 
__blkdev_put(struct block_device * bdev,fmode_t mode,int for_part)1771*4882a593Smuzhiyun static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1772*4882a593Smuzhiyun {
1773*4882a593Smuzhiyun 	struct gendisk *disk = bdev->bd_disk;
1774*4882a593Smuzhiyun 	struct block_device *victim = NULL;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	/*
1777*4882a593Smuzhiyun 	 * Sync early if it looks like we're the last one.  If someone else
1778*4882a593Smuzhiyun 	 * opens the block device between now and the decrement of bd_openers
1779*4882a593Smuzhiyun 	 * then we did a sync that we didn't need to, but that's not the end
1780*4882a593Smuzhiyun 	 * of the world and we want to avoid long (could be several minute)
1781*4882a593Smuzhiyun 	 * syncs while holding the mutex.
1782*4882a593Smuzhiyun 	 */
1783*4882a593Smuzhiyun 	if (bdev->bd_openers == 1)
1784*4882a593Smuzhiyun 		sync_blockdev(bdev);
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	mutex_lock_nested(&bdev->bd_mutex, for_part);
1787*4882a593Smuzhiyun 	if (for_part)
1788*4882a593Smuzhiyun 		bdev->bd_part_count--;
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	if (!--bdev->bd_openers) {
1791*4882a593Smuzhiyun 		WARN_ON_ONCE(bdev->bd_holders);
1792*4882a593Smuzhiyun 		sync_blockdev(bdev);
1793*4882a593Smuzhiyun 		kill_bdev(bdev);
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 		bdev_write_inode(bdev);
1796*4882a593Smuzhiyun 	}
1797*4882a593Smuzhiyun 	if (bdev->bd_contains == bdev) {
1798*4882a593Smuzhiyun 		if (disk->fops->release)
1799*4882a593Smuzhiyun 			disk->fops->release(disk, mode);
1800*4882a593Smuzhiyun 	}
1801*4882a593Smuzhiyun 	if (!bdev->bd_openers) {
1802*4882a593Smuzhiyun 		disk_put_part(bdev->bd_part);
1803*4882a593Smuzhiyun 		bdev->bd_part = NULL;
1804*4882a593Smuzhiyun 		bdev->bd_disk = NULL;
1805*4882a593Smuzhiyun 		if (bdev != bdev->bd_contains)
1806*4882a593Smuzhiyun 			victim = bdev->bd_contains;
1807*4882a593Smuzhiyun 		bdev->bd_contains = NULL;
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 		put_disk_and_module(disk);
1810*4882a593Smuzhiyun 	}
1811*4882a593Smuzhiyun 	mutex_unlock(&bdev->bd_mutex);
1812*4882a593Smuzhiyun 	bdput(bdev);
1813*4882a593Smuzhiyun 	if (victim)
1814*4882a593Smuzhiyun 		__blkdev_put(victim, mode, 1);
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun 
blkdev_put(struct block_device * bdev,fmode_t mode)1817*4882a593Smuzhiyun void blkdev_put(struct block_device *bdev, fmode_t mode)
1818*4882a593Smuzhiyun {
1819*4882a593Smuzhiyun 	mutex_lock(&bdev->bd_mutex);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	if (mode & FMODE_EXCL) {
1822*4882a593Smuzhiyun 		bool bdev_free;
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 		/*
1825*4882a593Smuzhiyun 		 * Release a claim on the device.  The holder fields
1826*4882a593Smuzhiyun 		 * are protected with bdev_lock.  bd_mutex is to
1827*4882a593Smuzhiyun 		 * synchronize disk_holder unlinking.
1828*4882a593Smuzhiyun 		 */
1829*4882a593Smuzhiyun 		spin_lock(&bdev_lock);
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 		WARN_ON_ONCE(--bdev->bd_holders < 0);
1832*4882a593Smuzhiyun 		WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun 		/* bd_contains might point to self, check in a separate step */
1835*4882a593Smuzhiyun 		if ((bdev_free = !bdev->bd_holders))
1836*4882a593Smuzhiyun 			bdev->bd_holder = NULL;
1837*4882a593Smuzhiyun 		if (!bdev->bd_contains->bd_holders)
1838*4882a593Smuzhiyun 			bdev->bd_contains->bd_holder = NULL;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 		spin_unlock(&bdev_lock);
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun 		/*
1843*4882a593Smuzhiyun 		 * If this was the last claim, remove holder link and
1844*4882a593Smuzhiyun 		 * unblock evpoll if it was a write holder.
1845*4882a593Smuzhiyun 		 */
1846*4882a593Smuzhiyun 		if (bdev_free && bdev->bd_write_holder) {
1847*4882a593Smuzhiyun 			disk_unblock_events(bdev->bd_disk);
1848*4882a593Smuzhiyun 			bdev->bd_write_holder = false;
1849*4882a593Smuzhiyun 		}
1850*4882a593Smuzhiyun 	}
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	/*
1853*4882a593Smuzhiyun 	 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1854*4882a593Smuzhiyun 	 * event.  This is to ensure detection of media removal commanded
1855*4882a593Smuzhiyun 	 * from userland - e.g. eject(1).
1856*4882a593Smuzhiyun 	 */
1857*4882a593Smuzhiyun 	disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 	mutex_unlock(&bdev->bd_mutex);
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	__blkdev_put(bdev, mode, 0);
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun EXPORT_SYMBOL(blkdev_put);
1864*4882a593Smuzhiyun 
blkdev_close(struct inode * inode,struct file * filp)1865*4882a593Smuzhiyun static int blkdev_close(struct inode * inode, struct file * filp)
1866*4882a593Smuzhiyun {
1867*4882a593Smuzhiyun 	struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
1868*4882a593Smuzhiyun 	blkdev_put(bdev, filp->f_mode);
1869*4882a593Smuzhiyun 	return 0;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun 
block_ioctl(struct file * file,unsigned cmd,unsigned long arg)1872*4882a593Smuzhiyun static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1873*4882a593Smuzhiyun {
1874*4882a593Smuzhiyun 	struct block_device *bdev = I_BDEV(bdev_file_inode(file));
1875*4882a593Smuzhiyun 	fmode_t mode = file->f_mode;
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	/*
1878*4882a593Smuzhiyun 	 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1879*4882a593Smuzhiyun 	 * to updated it before every ioctl.
1880*4882a593Smuzhiyun 	 */
1881*4882a593Smuzhiyun 	if (file->f_flags & O_NDELAY)
1882*4882a593Smuzhiyun 		mode |= FMODE_NDELAY;
1883*4882a593Smuzhiyun 	else
1884*4882a593Smuzhiyun 		mode &= ~FMODE_NDELAY;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	return blkdev_ioctl(bdev, mode, cmd, arg);
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun /*
1890*4882a593Smuzhiyun  * Write data to the block device.  Only intended for the block device itself
1891*4882a593Smuzhiyun  * and the raw driver which basically is a fake block device.
1892*4882a593Smuzhiyun  *
1893*4882a593Smuzhiyun  * Does not take i_mutex for the write and thus is not for general purpose
1894*4882a593Smuzhiyun  * use.
1895*4882a593Smuzhiyun  */
blkdev_write_iter(struct kiocb * iocb,struct iov_iter * from)1896*4882a593Smuzhiyun ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
1899*4882a593Smuzhiyun 	struct inode *bd_inode = bdev_file_inode(file);
1900*4882a593Smuzhiyun 	loff_t size = i_size_read(bd_inode);
1901*4882a593Smuzhiyun 	struct blk_plug plug;
1902*4882a593Smuzhiyun 	size_t shorted = 0;
1903*4882a593Smuzhiyun 	ssize_t ret;
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 	if (bdev_read_only(I_BDEV(bd_inode)))
1906*4882a593Smuzhiyun 		return -EPERM;
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
1909*4882a593Smuzhiyun 		return -ETXTBSY;
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	if (!iov_iter_count(from))
1912*4882a593Smuzhiyun 		return 0;
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	if (iocb->ki_pos >= size)
1915*4882a593Smuzhiyun 		return -ENOSPC;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
1918*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	size -= iocb->ki_pos;
1921*4882a593Smuzhiyun 	if (iov_iter_count(from) > size) {
1922*4882a593Smuzhiyun 		shorted = iov_iter_count(from) - size;
1923*4882a593Smuzhiyun 		iov_iter_truncate(from, size);
1924*4882a593Smuzhiyun 	}
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	blk_start_plug(&plug);
1927*4882a593Smuzhiyun 	ret = __generic_file_write_iter(iocb, from);
1928*4882a593Smuzhiyun 	if (ret > 0)
1929*4882a593Smuzhiyun 		ret = generic_write_sync(iocb, ret);
1930*4882a593Smuzhiyun 	iov_iter_reexpand(from, iov_iter_count(from) + shorted);
1931*4882a593Smuzhiyun 	blk_finish_plug(&plug);
1932*4882a593Smuzhiyun 	return ret;
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blkdev_write_iter);
1935*4882a593Smuzhiyun 
blkdev_read_iter(struct kiocb * iocb,struct iov_iter * to)1936*4882a593Smuzhiyun ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
1937*4882a593Smuzhiyun {
1938*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
1939*4882a593Smuzhiyun 	struct inode *bd_inode = bdev_file_inode(file);
1940*4882a593Smuzhiyun 	loff_t size = i_size_read(bd_inode);
1941*4882a593Smuzhiyun 	loff_t pos = iocb->ki_pos;
1942*4882a593Smuzhiyun 	size_t shorted = 0;
1943*4882a593Smuzhiyun 	ssize_t ret;
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	if (pos >= size)
1946*4882a593Smuzhiyun 		return 0;
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	size -= pos;
1949*4882a593Smuzhiyun 	if (iov_iter_count(to) > size) {
1950*4882a593Smuzhiyun 		shorted = iov_iter_count(to) - size;
1951*4882a593Smuzhiyun 		iov_iter_truncate(to, size);
1952*4882a593Smuzhiyun 	}
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	ret = generic_file_read_iter(iocb, to);
1955*4882a593Smuzhiyun 	iov_iter_reexpand(to, iov_iter_count(to) + shorted);
1956*4882a593Smuzhiyun 	return ret;
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blkdev_read_iter);
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun /*
1961*4882a593Smuzhiyun  * Try to release a page associated with block device when the system
1962*4882a593Smuzhiyun  * is under memory pressure.
1963*4882a593Smuzhiyun  */
blkdev_releasepage(struct page * page,gfp_t wait)1964*4882a593Smuzhiyun static int blkdev_releasepage(struct page *page, gfp_t wait)
1965*4882a593Smuzhiyun {
1966*4882a593Smuzhiyun 	struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	if (super && super->s_op->bdev_try_to_free_page)
1969*4882a593Smuzhiyun 		return super->s_op->bdev_try_to_free_page(super, page, wait);
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	return try_to_free_buffers(page);
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun 
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)1974*4882a593Smuzhiyun static int blkdev_writepages(struct address_space *mapping,
1975*4882a593Smuzhiyun 			     struct writeback_control *wbc)
1976*4882a593Smuzhiyun {
1977*4882a593Smuzhiyun 	return generic_writepages(mapping, wbc);
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun static const struct address_space_operations def_blk_aops = {
1981*4882a593Smuzhiyun 	.readpage	= blkdev_readpage,
1982*4882a593Smuzhiyun 	.readahead	= blkdev_readahead,
1983*4882a593Smuzhiyun 	.writepage	= blkdev_writepage,
1984*4882a593Smuzhiyun 	.write_begin	= blkdev_write_begin,
1985*4882a593Smuzhiyun 	.write_end	= blkdev_write_end,
1986*4882a593Smuzhiyun 	.writepages	= blkdev_writepages,
1987*4882a593Smuzhiyun 	.releasepage	= blkdev_releasepage,
1988*4882a593Smuzhiyun 	.direct_IO	= blkdev_direct_IO,
1989*4882a593Smuzhiyun 	.migratepage	= buffer_migrate_page_norefs,
1990*4882a593Smuzhiyun 	.is_dirty_writeback = buffer_check_dirty_writeback,
1991*4882a593Smuzhiyun };
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun #define	BLKDEV_FALLOC_FL_SUPPORTED					\
1994*4882a593Smuzhiyun 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
1995*4882a593Smuzhiyun 		 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
1996*4882a593Smuzhiyun 
blkdev_fallocate(struct file * file,int mode,loff_t start,loff_t len)1997*4882a593Smuzhiyun static long blkdev_fallocate(struct file *file, int mode, loff_t start,
1998*4882a593Smuzhiyun 			     loff_t len)
1999*4882a593Smuzhiyun {
2000*4882a593Smuzhiyun 	struct block_device *bdev = I_BDEV(bdev_file_inode(file));
2001*4882a593Smuzhiyun 	loff_t end = start + len - 1;
2002*4882a593Smuzhiyun 	loff_t isize;
2003*4882a593Smuzhiyun 	int error;
2004*4882a593Smuzhiyun 
2005*4882a593Smuzhiyun 	/* Fail if we don't recognize the flags. */
2006*4882a593Smuzhiyun 	if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
2007*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	/* Don't go off the end of the device. */
2010*4882a593Smuzhiyun 	isize = i_size_read(bdev->bd_inode);
2011*4882a593Smuzhiyun 	if (start >= isize)
2012*4882a593Smuzhiyun 		return -EINVAL;
2013*4882a593Smuzhiyun 	if (end >= isize) {
2014*4882a593Smuzhiyun 		if (mode & FALLOC_FL_KEEP_SIZE) {
2015*4882a593Smuzhiyun 			len = isize - start;
2016*4882a593Smuzhiyun 			end = start + len - 1;
2017*4882a593Smuzhiyun 		} else
2018*4882a593Smuzhiyun 			return -EINVAL;
2019*4882a593Smuzhiyun 	}
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	/*
2022*4882a593Smuzhiyun 	 * Don't allow IO that isn't aligned to logical block size.
2023*4882a593Smuzhiyun 	 */
2024*4882a593Smuzhiyun 	if ((start | len) & (bdev_logical_block_size(bdev) - 1))
2025*4882a593Smuzhiyun 		return -EINVAL;
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	/* Invalidate the page cache, including dirty pages. */
2028*4882a593Smuzhiyun 	error = truncate_bdev_range(bdev, file->f_mode, start, end);
2029*4882a593Smuzhiyun 	if (error)
2030*4882a593Smuzhiyun 		return error;
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun 	switch (mode) {
2033*4882a593Smuzhiyun 	case FALLOC_FL_ZERO_RANGE:
2034*4882a593Smuzhiyun 	case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
2035*4882a593Smuzhiyun 		error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
2036*4882a593Smuzhiyun 					    GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
2037*4882a593Smuzhiyun 		break;
2038*4882a593Smuzhiyun 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
2039*4882a593Smuzhiyun 		error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
2040*4882a593Smuzhiyun 					     GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK);
2041*4882a593Smuzhiyun 		break;
2042*4882a593Smuzhiyun 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
2043*4882a593Smuzhiyun 		error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
2044*4882a593Smuzhiyun 					     GFP_KERNEL, 0);
2045*4882a593Smuzhiyun 		break;
2046*4882a593Smuzhiyun 	default:
2047*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2048*4882a593Smuzhiyun 	}
2049*4882a593Smuzhiyun 	if (error)
2050*4882a593Smuzhiyun 		return error;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	/*
2053*4882a593Smuzhiyun 	 * Invalidate again; if someone wandered in and dirtied a page,
2054*4882a593Smuzhiyun 	 * the caller will be given -EBUSY.  The third argument is
2055*4882a593Smuzhiyun 	 * inclusive, so the rounding here is safe.
2056*4882a593Smuzhiyun 	 */
2057*4882a593Smuzhiyun 	return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
2058*4882a593Smuzhiyun 					     start >> PAGE_SHIFT,
2059*4882a593Smuzhiyun 					     end >> PAGE_SHIFT);
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun const struct file_operations def_blk_fops = {
2063*4882a593Smuzhiyun 	.open		= blkdev_open,
2064*4882a593Smuzhiyun 	.release	= blkdev_close,
2065*4882a593Smuzhiyun 	.llseek		= block_llseek,
2066*4882a593Smuzhiyun 	.read_iter	= blkdev_read_iter,
2067*4882a593Smuzhiyun 	.write_iter	= blkdev_write_iter,
2068*4882a593Smuzhiyun 	.iopoll		= blkdev_iopoll,
2069*4882a593Smuzhiyun 	.mmap		= generic_file_mmap,
2070*4882a593Smuzhiyun 	.fsync		= blkdev_fsync,
2071*4882a593Smuzhiyun 	.unlocked_ioctl	= block_ioctl,
2072*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
2073*4882a593Smuzhiyun 	.compat_ioctl	= compat_blkdev_ioctl,
2074*4882a593Smuzhiyun #endif
2075*4882a593Smuzhiyun 	.splice_read	= generic_file_splice_read,
2076*4882a593Smuzhiyun 	.splice_write	= iter_file_splice_write,
2077*4882a593Smuzhiyun 	.fallocate	= blkdev_fallocate,
2078*4882a593Smuzhiyun };
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun /**
2081*4882a593Smuzhiyun  * lookup_bdev  - lookup a struct block_device by name
2082*4882a593Smuzhiyun  * @pathname:	special file representing the block device
2083*4882a593Smuzhiyun  *
2084*4882a593Smuzhiyun  * Get a reference to the blockdevice at @pathname in the current
2085*4882a593Smuzhiyun  * namespace if possible and return it.  Return ERR_PTR(error)
2086*4882a593Smuzhiyun  * otherwise.
2087*4882a593Smuzhiyun  */
lookup_bdev(const char * pathname)2088*4882a593Smuzhiyun struct block_device *lookup_bdev(const char *pathname)
2089*4882a593Smuzhiyun {
2090*4882a593Smuzhiyun 	struct block_device *bdev;
2091*4882a593Smuzhiyun 	struct inode *inode;
2092*4882a593Smuzhiyun 	struct path path;
2093*4882a593Smuzhiyun 	int error;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	if (!pathname || !*pathname)
2096*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
2099*4882a593Smuzhiyun 	if (error)
2100*4882a593Smuzhiyun 		return ERR_PTR(error);
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 	inode = d_backing_inode(path.dentry);
2103*4882a593Smuzhiyun 	error = -ENOTBLK;
2104*4882a593Smuzhiyun 	if (!S_ISBLK(inode->i_mode))
2105*4882a593Smuzhiyun 		goto fail;
2106*4882a593Smuzhiyun 	error = -EACCES;
2107*4882a593Smuzhiyun 	if (!may_open_dev(&path))
2108*4882a593Smuzhiyun 		goto fail;
2109*4882a593Smuzhiyun 	error = -ENOMEM;
2110*4882a593Smuzhiyun 	bdev = bd_acquire(inode);
2111*4882a593Smuzhiyun 	if (!bdev)
2112*4882a593Smuzhiyun 		goto fail;
2113*4882a593Smuzhiyun out:
2114*4882a593Smuzhiyun 	path_put(&path);
2115*4882a593Smuzhiyun 	return bdev;
2116*4882a593Smuzhiyun fail:
2117*4882a593Smuzhiyun 	bdev = ERR_PTR(error);
2118*4882a593Smuzhiyun 	goto out;
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun EXPORT_SYMBOL(lookup_bdev);
2121*4882a593Smuzhiyun 
__invalidate_device(struct block_device * bdev,bool kill_dirty)2122*4882a593Smuzhiyun int __invalidate_device(struct block_device *bdev, bool kill_dirty)
2123*4882a593Smuzhiyun {
2124*4882a593Smuzhiyun 	struct super_block *sb = get_super(bdev);
2125*4882a593Smuzhiyun 	int res = 0;
2126*4882a593Smuzhiyun 
2127*4882a593Smuzhiyun 	if (sb) {
2128*4882a593Smuzhiyun 		/*
2129*4882a593Smuzhiyun 		 * no need to lock the super, get_super holds the
2130*4882a593Smuzhiyun 		 * read mutex so the filesystem cannot go away
2131*4882a593Smuzhiyun 		 * under us (->put_super runs with the write lock
2132*4882a593Smuzhiyun 		 * hold).
2133*4882a593Smuzhiyun 		 */
2134*4882a593Smuzhiyun 		shrink_dcache_sb(sb);
2135*4882a593Smuzhiyun 		res = invalidate_inodes(sb, kill_dirty);
2136*4882a593Smuzhiyun 		drop_super(sb);
2137*4882a593Smuzhiyun 	}
2138*4882a593Smuzhiyun 	invalidate_bdev(bdev);
2139*4882a593Smuzhiyun 	return res;
2140*4882a593Smuzhiyun }
2141*4882a593Smuzhiyun EXPORT_SYMBOL(__invalidate_device);
2142*4882a593Smuzhiyun 
iterate_bdevs(void (* func)(struct block_device *,void *),void * arg)2143*4882a593Smuzhiyun void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
2144*4882a593Smuzhiyun {
2145*4882a593Smuzhiyun 	struct inode *inode, *old_inode = NULL;
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	spin_lock(&blockdev_superblock->s_inode_list_lock);
2148*4882a593Smuzhiyun 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
2149*4882a593Smuzhiyun 		struct address_space *mapping = inode->i_mapping;
2150*4882a593Smuzhiyun 		struct block_device *bdev;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 		spin_lock(&inode->i_lock);
2153*4882a593Smuzhiyun 		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
2154*4882a593Smuzhiyun 		    mapping->nrpages == 0) {
2155*4882a593Smuzhiyun 			spin_unlock(&inode->i_lock);
2156*4882a593Smuzhiyun 			continue;
2157*4882a593Smuzhiyun 		}
2158*4882a593Smuzhiyun 		__iget(inode);
2159*4882a593Smuzhiyun 		spin_unlock(&inode->i_lock);
2160*4882a593Smuzhiyun 		spin_unlock(&blockdev_superblock->s_inode_list_lock);
2161*4882a593Smuzhiyun 		/*
2162*4882a593Smuzhiyun 		 * We hold a reference to 'inode' so it couldn't have been
2163*4882a593Smuzhiyun 		 * removed from s_inodes list while we dropped the
2164*4882a593Smuzhiyun 		 * s_inode_list_lock  We cannot iput the inode now as we can
2165*4882a593Smuzhiyun 		 * be holding the last reference and we cannot iput it under
2166*4882a593Smuzhiyun 		 * s_inode_list_lock. So we keep the reference and iput it
2167*4882a593Smuzhiyun 		 * later.
2168*4882a593Smuzhiyun 		 */
2169*4882a593Smuzhiyun 		iput(old_inode);
2170*4882a593Smuzhiyun 		old_inode = inode;
2171*4882a593Smuzhiyun 		bdev = I_BDEV(inode);
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 		mutex_lock(&bdev->bd_mutex);
2174*4882a593Smuzhiyun 		if (bdev->bd_openers)
2175*4882a593Smuzhiyun 			func(bdev, arg);
2176*4882a593Smuzhiyun 		mutex_unlock(&bdev->bd_mutex);
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 		spin_lock(&blockdev_superblock->s_inode_list_lock);
2179*4882a593Smuzhiyun 	}
2180*4882a593Smuzhiyun 	spin_unlock(&blockdev_superblock->s_inode_list_lock);
2181*4882a593Smuzhiyun 	iput(old_inode);
2182*4882a593Smuzhiyun }
2183