Lines Matching +full:foo +full:- +full:queue
1 // SPDX-License-Identifier: GPL-2.0-only
18 #include <linux/backing-dev.h>
53 return &BDEV_I(inode)->bdev; in I_BDEV()
59 struct inode *inode = bdev->bd_inode; in bdev_write_inode()
62 spin_lock(&inode->i_lock); in bdev_write_inode()
63 while (inode->i_state & I_DIRTY) { in bdev_write_inode()
64 spin_unlock(&inode->i_lock); in bdev_write_inode()
72 spin_lock(&inode->i_lock); in bdev_write_inode()
74 spin_unlock(&inode->i_lock); in bdev_write_inode()
80 struct address_space *mapping = bdev->bd_inode->i_mapping; in kill_bdev()
82 if (mapping->nrpages == 0 && mapping->nrexceptional == 0) in kill_bdev()
92 struct address_space *mapping = bdev->bd_inode->i_mapping; in invalidate_bdev()
94 if (mapping->nrpages) { in invalidate_bdev()
97 invalidate_mapping_pages(mapping, 0, -1); in invalidate_bdev()
122 claimed_bdev = bdev->bd_contains; in truncate_bdev_range()
128 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); in truncate_bdev_range()
138 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, in truncate_bdev_range()
147 loff_t size = i_size_read(bdev->bd_inode); in set_init_blocksize()
154 bdev->bd_inode->i_blkbits = blksize_bits(bsize); in set_init_blocksize()
161 return -EINVAL; in set_blocksize()
165 return -EINVAL; in set_blocksize()
168 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { in set_blocksize()
170 bdev->bd_inode->i_blkbits = blksize_bits(size); in set_blocksize()
180 if (set_blocksize(sb->s_bdev, size)) in sb_set_blocksize()
184 sb->s_blocksize = size; in sb_set_blocksize()
185 sb->s_blocksize_bits = blksize_bits(size); in sb_set_blocksize()
186 return sb->s_blocksize; in sb_set_blocksize()
193 int minsize = bdev_logical_block_size(sb->s_bdev); in sb_min_blocksize()
205 bh->b_bdev = I_BDEV(inode); in blkdev_get_block()
206 bh->b_blocknr = iblock; in blkdev_get_block()
213 return file->f_mapping->host; in bdev_file_inode()
221 if (iocb->ki_flags & IOCB_DSYNC) in dio_bio_write_op()
230 struct task_struct *waiter = bio->bi_private; in blkdev_bio_end_io_simple()
232 WRITE_ONCE(bio->bi_private, NULL); in blkdev_bio_end_io_simple()
240 struct file *file = iocb->ki_filp; in __blkdev_direct_IO_simple()
243 loff_t pos = iocb->ki_pos; in __blkdev_direct_IO_simple()
250 (bdev_logical_block_size(bdev) - 1)) in __blkdev_direct_IO_simple()
251 return -EINVAL; in __blkdev_direct_IO_simple()
259 return -ENOMEM; in __blkdev_direct_IO_simple()
265 bio.bi_write_hint = iocb->ki_hint; in __blkdev_direct_IO_simple()
268 bio.bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO_simple()
283 if (iocb->ki_flags & IOCB_NOWAIT) in __blkdev_direct_IO_simple()
285 if (iocb->ki_flags & IOCB_HIPRI) in __blkdev_direct_IO_simple()
293 if (!(iocb->ki_flags & IOCB_HIPRI) || in __blkdev_direct_IO_simple()
329 struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); in blkdev_iopoll()
332 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); in blkdev_iopoll()
337 struct blkdev_dio *dio = bio->bi_private; in blkdev_bio_end_io()
338 bool should_dirty = dio->should_dirty; in blkdev_bio_end_io()
340 if (bio->bi_status && !dio->bio.bi_status) in blkdev_bio_end_io()
341 dio->bio.bi_status = bio->bi_status; in blkdev_bio_end_io()
343 if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { in blkdev_bio_end_io()
344 if (!dio->is_sync) { in blkdev_bio_end_io()
345 struct kiocb *iocb = dio->iocb; in blkdev_bio_end_io()
348 if (likely(!dio->bio.bi_status)) { in blkdev_bio_end_io()
349 ret = dio->size; in blkdev_bio_end_io()
350 iocb->ki_pos += ret; in blkdev_bio_end_io()
352 ret = blk_status_to_errno(dio->bio.bi_status); in blkdev_bio_end_io()
355 dio->iocb->ki_complete(iocb, ret, 0); in blkdev_bio_end_io()
356 if (dio->multi_bio) in blkdev_bio_end_io()
357 bio_put(&dio->bio); in blkdev_bio_end_io()
359 struct task_struct *waiter = dio->waiter; in blkdev_bio_end_io()
361 WRITE_ONCE(dio->waiter, NULL); in blkdev_bio_end_io()
377 struct file *file = iocb->ki_filp; in __blkdev_direct_IO()
383 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; in __blkdev_direct_IO()
385 loff_t pos = iocb->ki_pos; in __blkdev_direct_IO()
390 (bdev_logical_block_size(bdev) - 1)) in __blkdev_direct_IO()
391 return -EINVAL; in __blkdev_direct_IO()
396 dio->is_sync = is_sync = is_sync_kiocb(iocb); in __blkdev_direct_IO()
397 if (dio->is_sync) { in __blkdev_direct_IO()
398 dio->waiter = current; in __blkdev_direct_IO()
401 dio->iocb = iocb; in __blkdev_direct_IO()
404 dio->size = 0; in __blkdev_direct_IO()
405 dio->multi_bio = false; in __blkdev_direct_IO()
406 dio->should_dirty = is_read && iter_is_iovec(iter); in __blkdev_direct_IO()
417 bio->bi_iter.bi_sector = pos >> 9; in __blkdev_direct_IO()
418 bio->bi_write_hint = iocb->ki_hint; in __blkdev_direct_IO()
419 bio->bi_private = dio; in __blkdev_direct_IO()
420 bio->bi_end_io = blkdev_bio_end_io; in __blkdev_direct_IO()
421 bio->bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO()
425 bio->bi_status = BLK_STS_IOERR; in __blkdev_direct_IO()
431 bio->bi_opf = REQ_OP_READ; in __blkdev_direct_IO()
432 if (dio->should_dirty) in __blkdev_direct_IO()
435 bio->bi_opf = dio_bio_write_op(iocb); in __blkdev_direct_IO()
436 task_io_account_write(bio->bi_iter.bi_size); in __blkdev_direct_IO()
438 if (iocb->ki_flags & IOCB_NOWAIT) in __blkdev_direct_IO()
439 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO()
441 dio->size += bio->bi_iter.bi_size; in __blkdev_direct_IO()
442 pos += bio->bi_iter.bi_size; in __blkdev_direct_IO()
448 if (iocb->ki_flags & IOCB_HIPRI) { in __blkdev_direct_IO()
456 WRITE_ONCE(iocb->ki_cookie, qc); in __blkdev_direct_IO()
460 if (!dio->multi_bio) { in __blkdev_direct_IO()
468 dio->multi_bio = true; in __blkdev_direct_IO()
469 atomic_set(&dio->ref, 2); in __blkdev_direct_IO()
471 atomic_inc(&dio->ref); in __blkdev_direct_IO()
482 return -EIOCBQUEUED; in __blkdev_direct_IO()
486 if (!READ_ONCE(dio->waiter)) in __blkdev_direct_IO()
489 if (!(iocb->ki_flags & IOCB_HIPRI) || in __blkdev_direct_IO()
496 ret = blk_status_to_errno(dio->bio.bi_status); in __blkdev_direct_IO()
498 ret = dio->size; in __blkdev_direct_IO()
500 bio_put(&dio->bio); in __blkdev_direct_IO()
529 return filemap_flush(bdev->bd_inode->i_mapping); in __sync_blockdev()
530 return filemap_write_and_wait(bdev->bd_inode->i_mapping); in __sync_blockdev()
561 * freeze_bdev -- lock a filesystem and force it into a consistent state
577 mutex_lock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
578 if (++bdev->bd_fsfreeze_count > 1) in freeze_bdev()
584 if (sb->s_op->freeze_super) in freeze_bdev()
585 error = sb->s_op->freeze_super(sb); in freeze_bdev()
591 bdev->bd_fsfreeze_count--; in freeze_bdev()
594 bdev->bd_fsfreeze_sb = sb; in freeze_bdev()
599 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
605 * thaw_bdev -- unlock filesystem
613 int error = -EINVAL; in thaw_bdev()
615 mutex_lock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
616 if (!bdev->bd_fsfreeze_count) in thaw_bdev()
620 if (--bdev->bd_fsfreeze_count > 0) in thaw_bdev()
623 sb = bdev->bd_fsfreeze_sb; in thaw_bdev()
627 if (sb->s_op->thaw_super) in thaw_bdev()
628 error = sb->s_op->thaw_super(sb); in thaw_bdev()
632 bdev->bd_fsfreeze_count++; in thaw_bdev()
634 mutex_unlock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
677 * for a block special file file_inode(file)->i_size is zero
707 if (error == -EOPNOTSUPP) in blkdev_fsync()
715 * bdev_read_page() - Start reading a page from a block device
725 * queue full; callers should try a different route to read this page rather
733 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_read_page()
734 int result = -EOPNOTSUPP; in bdev_read_page()
736 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_read_page()
739 result = blk_queue_enter(bdev->bd_disk->queue, 0); in bdev_read_page()
742 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_read_page()
744 blk_queue_exit(bdev->bd_disk->queue); in bdev_read_page()
749 * bdev_write_page() - Start writing a page to a block device
758 * queue the page to the device), the page will still be locked. If the
759 * caller is a ->writepage implementation, it will need to unlock the page.
762 * queue full; callers should try a different route to write this page rather
771 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_write_page()
773 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_write_page()
774 return -EOPNOTSUPP; in bdev_write_page()
775 result = blk_queue_enter(bdev->bd_disk->queue, 0); in bdev_write_page()
780 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_write_page()
788 blk_queue_exit(bdev->bd_disk->queue); in bdev_write_page()
793 * pseudo-fs
804 return &ei->vfs_inode; in bdev_alloc_inode()
812 static void init_once(void *foo) in init_once() argument
814 struct bdev_inode *ei = (struct bdev_inode *) foo; in init_once()
815 struct block_device *bdev = &ei->bdev; in init_once()
818 mutex_init(&bdev->bd_mutex); in init_once()
820 INIT_LIST_HEAD(&bdev->bd_holder_disks); in init_once()
822 bdev->bd_bdi = &noop_backing_dev_info; in init_once()
823 inode_init_once(&ei->vfs_inode); in init_once()
825 mutex_init(&bdev->bd_fsfreeze_mutex); in init_once()
830 struct block_device *bdev = &BDEV_I(inode)->bdev; in bdev_evict_inode()
831 truncate_inode_pages_final(&inode->i_data); in bdev_evict_inode()
834 /* Detach inode from wb early as bdi_put() may free bdi->wb */ in bdev_evict_inode()
836 if (bdev->bd_bdi != &noop_backing_dev_info) { in bdev_evict_inode()
837 bdi_put(bdev->bd_bdi); in bdev_evict_inode()
838 bdev->bd_bdi = &noop_backing_dev_info; in bdev_evict_inode()
854 return -ENOMEM; in bd_init_fs_context()
855 fc->s_iflags |= SB_I_CGROUPWB; in bd_init_fs_context()
856 ctx->ops = &bdev_sops; in bd_init_fs_context()
880 panic("Cannot register bdev pseudo-fs"); in bdev_cache_init()
883 panic("Cannot create bdev pseudo-fs"); in bdev_cache_init()
884 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ in bdev_cache_init()
888 * Most likely _very_ bad one - but then it's hardly critical for small
899 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; in bdev_test()
904 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; in bdev_set()
919 bdev = &BDEV_I(inode)->bdev; in bdget()
921 if (inode->i_state & I_NEW) { in bdget()
922 spin_lock_init(&bdev->bd_size_lock); in bdget()
923 bdev->bd_contains = NULL; in bdget()
924 bdev->bd_super = NULL; in bdget()
925 bdev->bd_inode = inode; in bdget()
926 bdev->bd_part_count = 0; in bdget()
927 inode->i_mode = S_IFBLK; in bdget()
928 inode->i_rdev = dev; in bdget()
929 inode->i_bdev = bdev; in bdget()
930 inode->i_data.a_ops = &def_blk_aops; in bdget()
931 mapping_set_gfp_mask(&inode->i_data, GFP_USER); in bdget()
938 * bdgrab -- Grab a reference to an already referenced block device
943 ihold(bdev->bd_inode); in bdgrab()
958 spin_lock(&blockdev_superblock->s_inode_list_lock); in nr_blockdev_pages()
959 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) in nr_blockdev_pages()
960 ret += inode->i_mapping->nrpages; in nr_blockdev_pages()
961 spin_unlock(&blockdev_superblock->s_inode_list_lock); in nr_blockdev_pages()
968 iput(bdev->bd_inode); in bdput()
978 bdev = inode->i_bdev; in bd_acquire()
979 if (bdev && !inode_unhashed(bdev->bd_inode)) { in bd_acquire()
995 bdev = bdget(inode->i_rdev); in bd_acquire()
998 if (!inode->i_bdev) { in bd_acquire()
1002 * So, we can access it via ->i_mapping always in bd_acquire()
1006 inode->i_bdev = bdev; in bd_acquire()
1007 inode->i_mapping = bdev->bd_inode->i_mapping; in bd_acquire()
1021 if (!sb_is_blkdev_sb(inode->i_sb)) in bd_forget()
1022 bdev = inode->i_bdev; in bd_forget()
1023 inode->i_bdev = NULL; in bd_forget()
1024 inode->i_mapping = &inode->i_data; in bd_forget()
1032 * bd_may_claim - test whether a block device can be claimed
1048 if (bdev->bd_holder == holder) in bd_may_claim()
1050 else if (bdev->bd_holder != NULL) in bd_may_claim()
1055 else if (whole->bd_holder == bd_may_claim) in bd_may_claim()
1057 else if (whole->bd_holder != NULL) in bd_may_claim()
1060 return true; /* is a partition of an un-held device */ in bd_may_claim()
1064 * bd_prepare_to_claim - claim a block device
1074 * 0 if @bdev can be claimed, -EBUSY otherwise.
1084 return -EBUSY; in bd_prepare_to_claim()
1088 if (whole->bd_claiming) { in bd_prepare_to_claim()
1089 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); in bd_prepare_to_claim()
1100 whole->bd_claiming = holder; in bd_prepare_to_claim()
1108 struct gendisk *disk = get_gendisk(bdev->bd_dev, partno); in bdev_get_gendisk()
1120 if (inode_unhashed(bdev->bd_inode)) { in bdev_get_gendisk()
1131 BUG_ON(whole->bd_claiming != holder); in bd_clear_claiming()
1132 whole->bd_claiming = NULL; in bd_clear_claiming()
1133 wake_up_bit(&whole->bd_claiming, 0); in bd_clear_claiming()
1137 * bd_finish_claiming - finish claiming of a block device
1154 whole->bd_holders++; in bd_finish_claiming()
1155 whole->bd_holder = bd_may_claim; in bd_finish_claiming()
1156 bdev->bd_holders++; in bd_finish_claiming()
1157 bdev->bd_holder = holder; in bd_finish_claiming()
1163 * bd_abort_claiming - abort claiming of a block device
1193 list_for_each_entry(holder, &bdev->bd_holder_disks, list) in bd_find_holder_disk()
1194 if (holder->disk == disk) in bd_find_holder_disk()
1210 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1218 * - from "slaves" directory of the holder @disk to the claimed @bdev
1219 * - from "holders" directory of the @bdev to the holder @disk
1221 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1224 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1225 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1235 * 0 on success, -errno on failure.
1242 mutex_lock(&bdev->bd_mutex); in bd_link_disk_holder()
1244 WARN_ON_ONCE(!bdev->bd_holder); in bd_link_disk_holder()
1247 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) in bd_link_disk_holder()
1252 holder->refcnt++; in bd_link_disk_holder()
1258 ret = -ENOMEM; in bd_link_disk_holder()
1262 INIT_LIST_HEAD(&holder->list); in bd_link_disk_holder()
1263 holder->disk = disk; in bd_link_disk_holder()
1264 holder->refcnt = 1; in bd_link_disk_holder()
1266 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_link_disk_holder()
1270 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); in bd_link_disk_holder()
1277 kobject_get(bdev->bd_part->holder_dir); in bd_link_disk_holder()
1279 list_add(&holder->list, &bdev->bd_holder_disks); in bd_link_disk_holder()
1283 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_link_disk_holder()
1287 mutex_unlock(&bdev->bd_mutex); in bd_link_disk_holder()
1293 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1306 mutex_lock(&bdev->bd_mutex); in bd_unlink_disk_holder()
1310 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) { in bd_unlink_disk_holder()
1311 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_unlink_disk_holder()
1312 del_symlink(bdev->bd_part->holder_dir, in bd_unlink_disk_holder()
1313 &disk_to_dev(disk)->kobj); in bd_unlink_disk_holder()
1314 kobject_put(bdev->bd_part->holder_dir); in bd_unlink_disk_holder()
1315 list_del_init(&holder->list); in bd_unlink_disk_holder()
1319 mutex_unlock(&bdev->bd_mutex); in bd_unlink_disk_holder()
1325 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1339 spin_lock(&bdev->bd_size_lock); in check_disk_size_change()
1341 bdev_size = i_size_read(bdev->bd_inode); in check_disk_size_change()
1346 disk->disk_name, bdev_size, disk_size); in check_disk_size_change()
1348 i_size_write(bdev->bd_inode, disk_size); in check_disk_size_change()
1350 spin_unlock(&bdev->bd_size_lock); in check_disk_size_change()
1355 disk->disk_name); in check_disk_size_change()
1360 * revalidate_disk_size - checks for disk size change and adjusts bdev size.
1376 if (disk->flags & GENHD_FL_HIDDEN) in revalidate_disk_size()
1389 spin_lock(&bdev->bd_size_lock); in bd_set_nr_sectors()
1390 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); in bd_set_nr_sectors()
1391 spin_unlock(&bdev->bd_size_lock); in bd_set_nr_sectors()
1399 struct gendisk *disk = bdev->bd_disk; in bdev_disk_changed()
1402 lockdep_assert_held(&bdev->bd_mutex); in bdev_disk_changed()
1404 if (!(disk->flags & GENHD_FL_UP)) in bdev_disk_changed()
1405 return -ENXIO; in bdev_disk_changed()
1412 clear_bit(GD_NEED_PART_SCAN, &disk->state); in bdev_disk_changed()
1418 * udisks polling for legacy ide-cdrom devices. Use the crude check in bdev_disk_changed()
1424 !(disk->flags & GENHD_FL_REMOVABLE)) in bdev_disk_changed()
1427 if (disk->fops->revalidate_disk) in bdev_disk_changed()
1428 disk->fops->revalidate_disk(disk); in bdev_disk_changed()
1435 if (ret == -EAGAIN) in bdev_disk_changed()
1442 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); in bdev_disk_changed()
1456 * mutex_lock(part->bd_mutex)
1457 * mutex_lock_nested(whole->bd_mutex, 1)
1471 ret = -ENXIO; in __blkdev_get()
1479 ret = -ENOMEM; in __blkdev_get()
1496 mutex_lock_nested(&bdev->bd_mutex, for_part); in __blkdev_get()
1497 if (!bdev->bd_openers) { in __blkdev_get()
1499 bdev->bd_disk = disk; in __blkdev_get()
1500 bdev->bd_contains = bdev; in __blkdev_get()
1501 bdev->bd_partno = partno; in __blkdev_get()
1504 ret = -ENXIO; in __blkdev_get()
1505 bdev->bd_part = disk_get_part(disk, partno); in __blkdev_get()
1506 if (!bdev->bd_part) in __blkdev_get()
1510 if (disk->fops->open) { in __blkdev_get()
1511 ret = disk->fops->open(bdev, mode); in __blkdev_get()
1516 if (ret == -ERESTARTSYS) in __blkdev_get()
1527 * if open succeeded or failed with -ENOMEDIUM. in __blkdev_get()
1531 if (test_bit(GD_NEED_PART_SCAN, &disk->state) && in __blkdev_get()
1532 (!ret || ret == -ENOMEDIUM)) in __blkdev_get()
1533 bdev_disk_changed(bdev, ret == -ENOMEDIUM); in __blkdev_get()
1542 bdev->bd_contains = bdgrab(whole); in __blkdev_get()
1543 bdev->bd_part = disk_get_part(disk, partno); in __blkdev_get()
1544 if (!(disk->flags & GENHD_FL_UP) || in __blkdev_get()
1545 !bdev->bd_part || !bdev->bd_part->nr_sects) { in __blkdev_get()
1546 ret = -ENXIO; in __blkdev_get()
1549 bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects); in __blkdev_get()
1553 if (bdev->bd_bdi == &noop_backing_dev_info) in __blkdev_get()
1554 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); in __blkdev_get()
1556 if (bdev->bd_contains == bdev) { in __blkdev_get()
1558 if (bdev->bd_disk->fops->open) in __blkdev_get()
1559 ret = bdev->bd_disk->fops->open(bdev, mode); in __blkdev_get()
1561 if (test_bit(GD_NEED_PART_SCAN, &disk->state) && in __blkdev_get()
1562 (!ret || ret == -ENOMEDIUM)) in __blkdev_get()
1563 bdev_disk_changed(bdev, ret == -ENOMEDIUM); in __blkdev_get()
1568 bdev->bd_openers++; in __blkdev_get()
1570 bdev->bd_part_count++; in __blkdev_get()
1580 if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder && in __blkdev_get()
1581 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { in __blkdev_get()
1582 bdev->bd_write_holder = true; in __blkdev_get()
1585 mutex_unlock(&bdev->bd_mutex); in __blkdev_get()
1598 disk_put_part(bdev->bd_part); in __blkdev_get()
1599 bdev->bd_disk = NULL; in __blkdev_get()
1600 bdev->bd_part = NULL; in __blkdev_get()
1601 if (bdev != bdev->bd_contains) in __blkdev_get()
1602 __blkdev_put(bdev->bd_contains, mode, 1); in __blkdev_get()
1603 bdev->bd_contains = NULL; in __blkdev_get()
1607 mutex_unlock(&bdev->bd_mutex); in __blkdev_get()
1621 * blkdev_get - open a block device
1637 * 0 on success, -errno on failure.
1647 ret = devcgroup_inode_permission(bdev->bd_inode, perm); in blkdev_get()
1662 * blkdev_get_by_path - open a block device by name
1676 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1694 return ERR_PTR(-EACCES); in blkdev_get_by_path()
1702 * blkdev_get_by_dev - open a block device by device number
1710 * Use it ONLY if you really do not have anything better - i.e. when
1713 * ever need it - reconsider your API.
1721 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1730 return ERR_PTR(-ENOMEM); in blkdev_get_by_dev()
1750 filp->f_flags |= O_LARGEFILE; in blkdev_open()
1752 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; in blkdev_open()
1754 if (filp->f_flags & O_NDELAY) in blkdev_open()
1755 filp->f_mode |= FMODE_NDELAY; in blkdev_open()
1756 if (filp->f_flags & O_EXCL) in blkdev_open()
1757 filp->f_mode |= FMODE_EXCL; in blkdev_open()
1758 if ((filp->f_flags & O_ACCMODE) == 3) in blkdev_open()
1759 filp->f_mode |= FMODE_WRITE_IOCTL; in blkdev_open()
1763 return -ENOMEM; in blkdev_open()
1765 filp->f_mapping = bdev->bd_inode->i_mapping; in blkdev_open()
1766 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); in blkdev_open()
1768 return blkdev_get(bdev, filp->f_mode, filp); in blkdev_open()
1773 struct gendisk *disk = bdev->bd_disk; in __blkdev_put()
1783 if (bdev->bd_openers == 1) in __blkdev_put()
1786 mutex_lock_nested(&bdev->bd_mutex, for_part); in __blkdev_put()
1788 bdev->bd_part_count--; in __blkdev_put()
1790 if (!--bdev->bd_openers) { in __blkdev_put()
1791 WARN_ON_ONCE(bdev->bd_holders); in __blkdev_put()
1797 if (bdev->bd_contains == bdev) { in __blkdev_put()
1798 if (disk->fops->release) in __blkdev_put()
1799 disk->fops->release(disk, mode); in __blkdev_put()
1801 if (!bdev->bd_openers) { in __blkdev_put()
1802 disk_put_part(bdev->bd_part); in __blkdev_put()
1803 bdev->bd_part = NULL; in __blkdev_put()
1804 bdev->bd_disk = NULL; in __blkdev_put()
1805 if (bdev != bdev->bd_contains) in __blkdev_put()
1806 victim = bdev->bd_contains; in __blkdev_put()
1807 bdev->bd_contains = NULL; in __blkdev_put()
1811 mutex_unlock(&bdev->bd_mutex); in __blkdev_put()
1819 mutex_lock(&bdev->bd_mutex); in blkdev_put()
1831 WARN_ON_ONCE(--bdev->bd_holders < 0); in blkdev_put()
1832 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); in blkdev_put()
1835 if ((bdev_free = !bdev->bd_holders)) in blkdev_put()
1836 bdev->bd_holder = NULL; in blkdev_put()
1837 if (!bdev->bd_contains->bd_holders) in blkdev_put()
1838 bdev->bd_contains->bd_holder = NULL; in blkdev_put()
1846 if (bdev_free && bdev->bd_write_holder) { in blkdev_put()
1847 disk_unblock_events(bdev->bd_disk); in blkdev_put()
1848 bdev->bd_write_holder = false; in blkdev_put()
1855 * from userland - e.g. eject(1). in blkdev_put()
1857 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE); in blkdev_put()
1859 mutex_unlock(&bdev->bd_mutex); in blkdev_put()
1868 blkdev_put(bdev, filp->f_mode); in blkdev_close()
1875 fmode_t mode = file->f_mode; in block_ioctl()
1881 if (file->f_flags & O_NDELAY) in block_ioctl()
1898 struct file *file = iocb->ki_filp; in blkdev_write_iter()
1906 return -EPERM; in blkdev_write_iter()
1908 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) in blkdev_write_iter()
1909 return -ETXTBSY; in blkdev_write_iter()
1914 if (iocb->ki_pos >= size) in blkdev_write_iter()
1915 return -ENOSPC; in blkdev_write_iter()
1917 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) in blkdev_write_iter()
1918 return -EOPNOTSUPP; in blkdev_write_iter()
1920 size -= iocb->ki_pos; in blkdev_write_iter()
1922 shorted = iov_iter_count(from) - size; in blkdev_write_iter()
1938 struct file *file = iocb->ki_filp; in blkdev_read_iter()
1941 loff_t pos = iocb->ki_pos; in blkdev_read_iter()
1948 size -= pos; in blkdev_read_iter()
1950 shorted = iov_iter_count(to) - size; in blkdev_read_iter()
1966 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; in blkdev_releasepage()
1968 if (super && super->s_op->bdev_try_to_free_page) in blkdev_releasepage()
1969 return super->s_op->bdev_try_to_free_page(super, page, wait); in blkdev_releasepage()
2001 loff_t end = start + len - 1; in blkdev_fallocate()
2007 return -EOPNOTSUPP; in blkdev_fallocate()
2010 isize = i_size_read(bdev->bd_inode); in blkdev_fallocate()
2012 return -EINVAL; in blkdev_fallocate()
2015 len = isize - start; in blkdev_fallocate()
2016 end = start + len - 1; in blkdev_fallocate()
2018 return -EINVAL; in blkdev_fallocate()
2024 if ((start | len) & (bdev_logical_block_size(bdev) - 1)) in blkdev_fallocate()
2025 return -EINVAL; in blkdev_fallocate()
2028 error = truncate_bdev_range(bdev, file->f_mode, start, end); in blkdev_fallocate()
2047 return -EOPNOTSUPP; in blkdev_fallocate()
2054 * the caller will be given -EBUSY. The third argument is in blkdev_fallocate()
2057 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, in blkdev_fallocate()
2081 * lookup_bdev - lookup a struct block_device by name
2096 return ERR_PTR(-EINVAL); in lookup_bdev()
2103 error = -ENOTBLK; in lookup_bdev()
2104 if (!S_ISBLK(inode->i_mode)) in lookup_bdev()
2106 error = -EACCES; in lookup_bdev()
2109 error = -ENOMEM; in lookup_bdev()
2131 * under us (->put_super runs with the write lock in __invalidate_device()
2147 spin_lock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2148 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { in iterate_bdevs()
2149 struct address_space *mapping = inode->i_mapping; in iterate_bdevs()
2152 spin_lock(&inode->i_lock); in iterate_bdevs()
2153 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || in iterate_bdevs()
2154 mapping->nrpages == 0) { in iterate_bdevs()
2155 spin_unlock(&inode->i_lock); in iterate_bdevs()
2159 spin_unlock(&inode->i_lock); in iterate_bdevs()
2160 spin_unlock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2173 mutex_lock(&bdev->bd_mutex); in iterate_bdevs()
2174 if (bdev->bd_openers) in iterate_bdevs()
2176 mutex_unlock(&bdev->bd_mutex); in iterate_bdevs()
2178 spin_lock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2180 spin_unlock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()