Lines Matching refs:md

98 	struct mapped_device *md;  member
339 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument
341 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
346 struct mapped_device *md; in dm_blk_open() local
350 md = bdev->bd_disk->private_data; in dm_blk_open()
351 if (!md) in dm_blk_open()
354 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
355 dm_deleting_md(md)) { in dm_blk_open()
356 md = NULL; in dm_blk_open()
360 dm_get(md); in dm_blk_open()
361 atomic_inc(&md->open_count); in dm_blk_open()
365 return md ? 0 : -ENXIO; in dm_blk_open()
370 struct mapped_device *md; in dm_blk_close() local
374 md = disk->private_data; in dm_blk_close()
375 if (WARN_ON(!md)) in dm_blk_close()
378 if (atomic_dec_and_test(&md->open_count) && in dm_blk_close()
379 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in dm_blk_close()
382 dm_put(md); in dm_blk_close()
387 int dm_open_count(struct mapped_device *md) in dm_open_count() argument
389 return atomic_read(&md->open_count); in dm_open_count()
395 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion() argument
401 if (dm_open_count(md)) { in dm_lock_for_deletion()
404 set_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_lock_for_deletion()
405 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) in dm_lock_for_deletion()
408 set_bit(DMF_DELETING, &md->flags); in dm_lock_for_deletion()
415 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove() argument
421 if (test_bit(DMF_DELETING, &md->flags)) in dm_cancel_deferred_remove()
424 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_cancel_deferred_remove()
438 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo() local
440 return dm_get_geometry(md, geo); in dm_blk_getgeo()
477 struct mapped_device *md = disk->private_data; in dm_blk_report_zones() local
486 if (dm_suspended_md(md)) in dm_blk_report_zones()
489 map = dm_get_live_table(md, &srcu_idx); in dm_blk_report_zones()
514 dm_put_live_table(md, srcu_idx); in dm_blk_report_zones()
521 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, in dm_prepare_ioctl() argument
530 map = dm_get_live_table(md, srcu_idx); in dm_prepare_ioctl()
542 if (dm_suspended_md(md)) in dm_prepare_ioctl()
547 dm_put_live_table(md, *srcu_idx); in dm_prepare_ioctl()
555 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) in dm_unprepare_ioctl() argument
557 dm_put_live_table(md, srcu_idx); in dm_unprepare_ioctl()
563 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl() local
566 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_blk_ioctl()
586 dm_unprepare_ioctl(md, srcu_idx); in dm_blk_ioctl()
601 struct mapped_device *md = io->md; in start_io_acct() local
605 if (unlikely(dm_stats_used(&md->stats))) in start_io_acct()
606 dm_stats_account_io(&md->stats, bio_data_dir(bio), in start_io_acct()
611 static void end_io_acct(struct mapped_device *md, struct bio *bio, in end_io_acct() argument
616 if (unlikely(dm_stats_used(&md->stats))) in end_io_acct()
617 dm_stats_account_io(&md->stats, bio_data_dir(bio), in end_io_acct()
626 if (unlikely(wq_has_sleeper(&md->wait))) in end_io_acct()
627 wake_up(&md->wait); in end_io_acct()
630 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) in alloc_io() argument
636 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); in alloc_io()
649 io->md = md; in alloc_io()
657 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io() argument
671 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); in alloc_tio()
697 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
701 spin_lock_irqsave(&md->deferred_lock, flags); in queue_io()
702 bio_list_add(&md->deferred, bio); in queue_io()
703 spin_unlock_irqrestore(&md->deferred_lock, flags); in queue_io()
704 queue_work(md->wq, &md->work); in queue_io()
712 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table() argument
714 *srcu_idx = srcu_read_lock(&md->io_barrier); in dm_get_live_table()
716 return srcu_dereference(md->map, &md->io_barrier); in dm_get_live_table()
719 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table() argument
721 srcu_read_unlock(&md->io_barrier, srcu_idx); in dm_put_live_table()
724 void dm_sync_table(struct mapped_device *md) in dm_sync_table() argument
726 synchronize_srcu(&md->io_barrier); in dm_sync_table()
734 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast() argument
737 return rcu_dereference(md->map); in dm_get_live_table_fast()
740 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast() argument
751 struct mapped_device *md) in open_table_device() argument
763 r = bd_link_disk_holder(bdev, dm_disk(md)); in open_table_device()
777 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device() argument
782 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); in close_table_device()
801 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device() argument
807 mutex_lock(&md->table_devices_lock); in dm_get_table_device()
808 td = find_table_device(&md->table_devices, dev, mode); in dm_get_table_device()
810 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); in dm_get_table_device()
812 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
819 if ((r = open_table_device(td, dev, md))) { in dm_get_table_device()
820 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
828 list_add(&td->list, &md->table_devices); in dm_get_table_device()
832 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
839 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device() argument
843 mutex_lock(&md->table_devices_lock); in dm_put_table_device()
845 close_table_device(td, md); in dm_put_table_device()
849 mutex_unlock(&md->table_devices_lock); in dm_put_table_device()
869 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry() argument
871 *geo = md->geometry; in dm_get_geometry()
879 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry() argument
888 md->geometry = *geo; in dm_set_geometry()
893 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending() argument
895 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __noflush_suspending()
907 struct mapped_device *md = io->md; in dec_pending() local
914 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) in dec_pending()
924 spin_lock_irqsave(&md->deferred_lock, flags); in dec_pending()
925 if (__noflush_suspending(md)) in dec_pending()
927 bio_list_add_head(&md->deferred, io->orig_bio); in dec_pending()
931 spin_unlock_irqrestore(&md->deferred_lock, flags); in dec_pending()
938 free_io(md, io); in dec_pending()
939 end_io_acct(md, bio, start_time, &stats_aux); in dec_pending()
950 queue_io(md, bio); in dec_pending()
960 void disable_discard(struct mapped_device *md) in disable_discard() argument
962 struct queue_limits *limits = dm_get_queue_limits(md); in disable_discard()
966 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); in disable_discard()
969 void disable_write_same(struct mapped_device *md) in disable_write_same() argument
971 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_same()
977 void disable_write_zeroes(struct mapped_device *md) in disable_write_zeroes() argument
979 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_zeroes()
995 struct mapped_device *md = tio->io->md; in clone_endio() local
1002 disable_discard(md); in clone_endio()
1005 disable_write_same(md); in clone_endio()
1008 disable_write_zeroes(md); in clone_endio()
1041 struct mapped_device *md = io->md; in clone_endio() local
1042 up(&md->swap_bios_semaphore); in clone_endio()
1073 max_len = blk_max_size_offset(ti->table->md->queue, in max_io_len()
1097 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, in dm_dax_get_live_target() argument
1099 __acquires(md->io_barrier) in dm_dax_get_live_target()
1104 map = dm_get_live_table(md, srcu_idx); in dm_dax_get_live_target()
1118 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_direct_access() local
1124 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_direct_access()
1137 dm_put_live_table(md, srcu_idx); in dm_dax_direct_access()
1145 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_supported() local
1150 map = dm_get_live_table(md, &srcu_idx); in dm_dax_supported()
1157 dm_put_live_table(md, srcu_idx); in dm_dax_supported()
1165 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_from_iter() local
1171 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_copy_from_iter()
1181 dm_put_live_table(md, srcu_idx); in dm_dax_copy_from_iter()
1189 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_to_iter() local
1195 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_copy_to_iter()
1205 dm_put_live_table(md, srcu_idx); in dm_dax_copy_to_iter()
1213 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_zero_page_range() local
1219 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_zero_page_range()
1232 dm_put_live_table(md, srcu_idx); in dm_dax_zero_page_range()
1282 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) in __set_swap_bios_limit() argument
1284 mutex_lock(&md->swap_bios_lock); in __set_swap_bios_limit()
1285 while (latch < md->swap_bios) { in __set_swap_bios_limit()
1287 down(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1288 md->swap_bios--; in __set_swap_bios_limit()
1290 while (latch > md->swap_bios) { in __set_swap_bios_limit()
1292 up(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1293 md->swap_bios++; in __set_swap_bios_limit()
1295 mutex_unlock(&md->swap_bios_lock); in __set_swap_bios_limit()
1318 struct mapped_device *md = io->md; in __map_bio() local
1320 if (unlikely(latch != md->swap_bios)) in __map_bio()
1321 __set_swap_bios_limit(md, latch); in __map_bio()
1322 down(&md->swap_bios_semaphore); in __map_bio()
1337 struct mapped_device *md = io->md; in __map_bio() local
1338 up(&md->swap_bios_semaphore); in __map_bio()
1345 struct mapped_device *md = io->md; in __map_bio() local
1346 up(&md->swap_bios_semaphore); in __map_bio()
1384 dm_device_name(tio->io->md), in clone_bio()
1423 mutex_lock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1432 mutex_unlock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1495 bio_set_dev(ci->bio, ci->io->md->bdev); in __send_empty_flush()
1620 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, in init_clone_info() argument
1624 ci->io = alloc_io(md, bio); in init_clone_info()
1634 static blk_qc_t __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio() argument
1641 init_clone_info(&ci, md, map, bio); in __split_and_process_bio()
1665 GFP_NOIO, &md->queue->bio_split); in __split_and_process_bio()
1676 __dm_part_stat_sub(&dm_disk(md)->part0, in __split_and_process_bio()
1681 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); in __split_and_process_bio()
1695 struct mapped_device *md = bio->bi_disk->private_data; in dm_submit_bio() local
1700 map = dm_get_live_table(md, &srcu_idx); in dm_submit_bio()
1703 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || in dm_submit_bio()
1710 queue_io(md, bio); in dm_submit_bio()
1721 ret = __split_and_process_bio(md, map, bio); in dm_submit_bio()
1723 dm_put_live_table(md, srcu_idx); in dm_submit_bio()
1795 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device() argument
1797 if (md->wq) in cleanup_mapped_device()
1798 destroy_workqueue(md->wq); in cleanup_mapped_device()
1799 bioset_exit(&md->bs); in cleanup_mapped_device()
1800 bioset_exit(&md->io_bs); in cleanup_mapped_device()
1802 if (md->dax_dev) { in cleanup_mapped_device()
1803 kill_dax(md->dax_dev); in cleanup_mapped_device()
1804 put_dax(md->dax_dev); in cleanup_mapped_device()
1805 md->dax_dev = NULL; in cleanup_mapped_device()
1808 if (md->disk) { in cleanup_mapped_device()
1810 md->disk->private_data = NULL; in cleanup_mapped_device()
1812 del_gendisk(md->disk); in cleanup_mapped_device()
1813 put_disk(md->disk); in cleanup_mapped_device()
1816 if (md->queue) { in cleanup_mapped_device()
1817 dm_queue_destroy_keyslot_manager(md->queue); in cleanup_mapped_device()
1818 blk_cleanup_queue(md->queue); in cleanup_mapped_device()
1821 cleanup_srcu_struct(&md->io_barrier); in cleanup_mapped_device()
1823 if (md->bdev) { in cleanup_mapped_device()
1824 bdput(md->bdev); in cleanup_mapped_device()
1825 md->bdev = NULL; in cleanup_mapped_device()
1828 mutex_destroy(&md->suspend_lock); in cleanup_mapped_device()
1829 mutex_destroy(&md->type_lock); in cleanup_mapped_device()
1830 mutex_destroy(&md->table_devices_lock); in cleanup_mapped_device()
1831 mutex_destroy(&md->swap_bios_lock); in cleanup_mapped_device()
1833 dm_mq_cleanup_mapped_device(md); in cleanup_mapped_device()
1842 struct mapped_device *md; in alloc_dev() local
1845 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); in alloc_dev()
1846 if (!md) { in alloc_dev()
1862 r = init_srcu_struct(&md->io_barrier); in alloc_dev()
1866 md->numa_node_id = numa_node_id; in alloc_dev()
1867 md->init_tio_pdu = false; in alloc_dev()
1868 md->type = DM_TYPE_NONE; in alloc_dev()
1869 mutex_init(&md->suspend_lock); in alloc_dev()
1870 mutex_init(&md->type_lock); in alloc_dev()
1871 mutex_init(&md->table_devices_lock); in alloc_dev()
1872 spin_lock_init(&md->deferred_lock); in alloc_dev()
1873 atomic_set(&md->holders, 1); in alloc_dev()
1874 atomic_set(&md->open_count, 0); in alloc_dev()
1875 atomic_set(&md->event_nr, 0); in alloc_dev()
1876 atomic_set(&md->uevent_seq, 0); in alloc_dev()
1877 INIT_LIST_HEAD(&md->uevent_list); in alloc_dev()
1878 INIT_LIST_HEAD(&md->table_devices); in alloc_dev()
1879 spin_lock_init(&md->uevent_lock); in alloc_dev()
1886 md->queue = blk_alloc_queue(numa_node_id); in alloc_dev()
1887 if (!md->queue) in alloc_dev()
1890 md->disk = alloc_disk_node(1, md->numa_node_id); in alloc_dev()
1891 if (!md->disk) in alloc_dev()
1894 init_waitqueue_head(&md->wait); in alloc_dev()
1895 INIT_WORK(&md->work, dm_wq_work); in alloc_dev()
1896 init_waitqueue_head(&md->eventq); in alloc_dev()
1897 init_completion(&md->kobj_holder.completion); in alloc_dev()
1899 md->swap_bios = get_swap_bios(); in alloc_dev()
1900 sema_init(&md->swap_bios_semaphore, md->swap_bios); in alloc_dev()
1901 mutex_init(&md->swap_bios_lock); in alloc_dev()
1903 md->disk->major = _major; in alloc_dev()
1904 md->disk->first_minor = minor; in alloc_dev()
1905 md->disk->fops = &dm_blk_dops; in alloc_dev()
1906 md->disk->queue = md->queue; in alloc_dev()
1907 md->disk->private_data = md; in alloc_dev()
1908 sprintf(md->disk->disk_name, "dm-%d", minor); in alloc_dev()
1911 md->dax_dev = alloc_dax(md, md->disk->disk_name, in alloc_dev()
1913 if (IS_ERR(md->dax_dev)) { in alloc_dev()
1914 md->dax_dev = NULL; in alloc_dev()
1919 add_disk_no_queue_reg(md->disk); in alloc_dev()
1920 format_dev_t(md->name, MKDEV(_major, minor)); in alloc_dev()
1922 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); in alloc_dev()
1923 if (!md->wq) in alloc_dev()
1926 md->bdev = bdget_disk(md->disk, 0); in alloc_dev()
1927 if (!md->bdev) in alloc_dev()
1930 dm_stats_init(&md->stats); in alloc_dev()
1934 old_md = idr_replace(&_minor_idr, md, minor); in alloc_dev()
1939 return md; in alloc_dev()
1942 cleanup_mapped_device(md); in alloc_dev()
1948 kvfree(md); in alloc_dev()
1952 static void unlock_fs(struct mapped_device *md);
1954 static void free_dev(struct mapped_device *md) in free_dev() argument
1956 int minor = MINOR(disk_devt(md->disk)); in free_dev()
1958 unlock_fs(md); in free_dev()
1960 cleanup_mapped_device(md); in free_dev()
1962 free_table_devices(&md->table_devices); in free_dev()
1963 dm_stats_cleanup(&md->stats); in free_dev()
1967 kvfree(md); in free_dev()
1970 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools() argument
1981 bioset_exit(&md->bs); in __bind_mempools()
1982 bioset_exit(&md->io_bs); in __bind_mempools()
1984 } else if (bioset_initialized(&md->bs)) { in __bind_mempools()
1997 bioset_initialized(&md->bs) || in __bind_mempools()
1998 bioset_initialized(&md->io_bs)); in __bind_mempools()
2000 ret = bioset_init_from_src(&md->bs, &p->bs); in __bind_mempools()
2003 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); in __bind_mempools()
2005 bioset_exit(&md->bs); in __bind_mempools()
2019 struct mapped_device *md = (struct mapped_device *) context; in event_callback() local
2021 spin_lock_irqsave(&md->uevent_lock, flags); in event_callback()
2022 list_splice_init(&md->uevent_list, &uevents); in event_callback()
2023 spin_unlock_irqrestore(&md->uevent_lock, flags); in event_callback()
2025 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); in event_callback()
2027 atomic_inc(&md->event_nr); in event_callback()
2028 wake_up(&md->eventq); in event_callback()
2035 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind() argument
2039 struct request_queue *q = md->queue; in __bind()
2044 lockdep_assert_held(&md->suspend_lock); in __bind()
2051 if (size != dm_get_size(md)) in __bind()
2052 memset(&md->geometry, 0, sizeof(md->geometry)); in __bind()
2054 set_capacity(md->disk, size); in __bind()
2055 bd_set_nr_sectors(md->bdev, size); in __bind()
2057 dm_table_event_callback(t, event_callback, md); in __bind()
2074 md->immutable_target = dm_table_get_immutable_target(t); in __bind()
2077 ret = __bind_mempools(md, t); in __bind()
2083 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __bind()
2084 rcu_assign_pointer(md->map, (void *)t); in __bind()
2085 md->immutable_target_type = dm_table_get_immutable_target_type(t); in __bind()
2089 dm_sync_table(md); in __bind()
2098 static struct dm_table *__unbind(struct mapped_device *md) in __unbind() argument
2100 struct dm_table *map = rcu_dereference_protected(md->map, 1); in __unbind()
2106 RCU_INIT_POINTER(md->map, NULL); in __unbind()
2107 dm_sync_table(md); in __unbind()
2118 struct mapped_device *md; in dm_create() local
2120 md = alloc_dev(minor); in dm_create()
2121 if (!md) in dm_create()
2124 r = dm_sysfs_init(md); in dm_create()
2126 free_dev(md); in dm_create()
2130 *result = md; in dm_create()
2138 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type() argument
2140 mutex_lock(&md->type_lock); in dm_lock_md_type()
2143 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type() argument
2145 mutex_unlock(&md->type_lock); in dm_unlock_md_type()
2148 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) in dm_set_md_type() argument
2150 BUG_ON(!mutex_is_locked(&md->type_lock)); in dm_set_md_type()
2151 md->type = type; in dm_set_md_type()
2154 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) in dm_get_md_type() argument
2156 return md->type; in dm_get_md_type()
2159 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type() argument
2161 return md->immutable_target_type; in dm_get_immutable_target_type()
2168 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits() argument
2170 BUG_ON(!atomic_read(&md->holders)); in dm_get_queue_limits()
2171 return &md->queue->limits; in dm_get_queue_limits()
2178 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) in dm_setup_md_queue() argument
2182 enum dm_queue_mode type = dm_get_md_type(md); in dm_setup_md_queue()
2186 md->disk->fops = &dm_rq_blk_dops; in dm_setup_md_queue()
2187 r = dm_mq_init_request_queue(md, t); in dm_setup_md_queue()
2206 dm_table_set_restrictions(t, md->queue, &limits); in dm_setup_md_queue()
2207 blk_register_queue(md->disk); in dm_setup_md_queue()
2214 struct mapped_device *md; in dm_get_md() local
2222 md = idr_find(&_minor_idr, minor); in dm_get_md()
2223 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || in dm_get_md()
2224 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_md()
2225 md = NULL; in dm_get_md()
2228 dm_get(md); in dm_get_md()
2232 return md; in dm_get_md()
2236 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr() argument
2238 return md->interface_ptr; in dm_get_mdptr()
2241 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr() argument
2243 md->interface_ptr = ptr; in dm_set_mdptr()
2246 void dm_get(struct mapped_device *md) in dm_get() argument
2248 atomic_inc(&md->holders); in dm_get()
2249 BUG_ON(test_bit(DMF_FREEING, &md->flags)); in dm_get()
2252 int dm_hold(struct mapped_device *md) in dm_hold() argument
2255 if (test_bit(DMF_FREEING, &md->flags)) { in dm_hold()
2259 dm_get(md); in dm_hold()
2265 const char *dm_device_name(struct mapped_device *md) in dm_device_name() argument
2267 return md->name; in dm_device_name()
2271 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy() argument
2279 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); in __dm_destroy()
2280 set_bit(DMF_FREEING, &md->flags); in __dm_destroy()
2283 blk_set_queue_dying(md->queue); in __dm_destroy()
2289 mutex_lock(&md->suspend_lock); in __dm_destroy()
2290 map = dm_get_live_table(md, &srcu_idx); in __dm_destroy()
2291 if (!dm_suspended_md(md)) { in __dm_destroy()
2293 set_bit(DMF_SUSPENDED, &md->flags); in __dm_destroy()
2294 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_destroy()
2298 dm_put_live_table(md, srcu_idx); in __dm_destroy()
2299 mutex_unlock(&md->suspend_lock); in __dm_destroy()
2308 while (atomic_read(&md->holders)) in __dm_destroy()
2310 else if (atomic_read(&md->holders)) in __dm_destroy()
2312 dm_device_name(md), atomic_read(&md->holders)); in __dm_destroy()
2314 dm_sysfs_exit(md); in __dm_destroy()
2315 dm_table_destroy(__unbind(md)); in __dm_destroy()
2316 free_dev(md); in __dm_destroy()
2319 void dm_destroy(struct mapped_device *md) in dm_destroy() argument
2321 __dm_destroy(md, true); in dm_destroy()
2324 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate() argument
2326 __dm_destroy(md, false); in dm_destroy_immediate()
2329 void dm_put(struct mapped_device *md) in dm_put() argument
2331 atomic_dec(&md->holders); in dm_put()
2335 static bool md_in_flight_bios(struct mapped_device *md) in md_in_flight_bios() argument
2338 struct hd_struct *part = &dm_disk(md)->part0; in md_in_flight_bios()
2349 static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) in dm_wait_for_bios_completion() argument
2355 prepare_to_wait(&md->wait, &wait, task_state); in dm_wait_for_bios_completion()
2357 if (!md_in_flight_bios(md)) in dm_wait_for_bios_completion()
2367 finish_wait(&md->wait, &wait); in dm_wait_for_bios_completion()
2374 static int dm_wait_for_completion(struct mapped_device *md, long task_state) in dm_wait_for_completion() argument
2378 if (!queue_is_mq(md->queue)) in dm_wait_for_completion()
2379 return dm_wait_for_bios_completion(md, task_state); in dm_wait_for_completion()
2382 if (!blk_mq_queue_inflight(md->queue)) in dm_wait_for_completion()
2401 struct mapped_device *md = container_of(work, struct mapped_device, work); in dm_wq_work() local
2404 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_wq_work()
2405 spin_lock_irq(&md->deferred_lock); in dm_wq_work()
2406 bio = bio_list_pop(&md->deferred); in dm_wq_work()
2407 spin_unlock_irq(&md->deferred_lock); in dm_wq_work()
2416 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush() argument
2418 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_queue_flush()
2420 queue_work(md->wq, &md->work); in dm_queue_flush()
2426 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table() argument
2432 mutex_lock(&md->suspend_lock); in dm_swap_table()
2435 if (!dm_suspended_md(md)) in dm_swap_table()
2445 live_map = dm_get_live_table_fast(md); in dm_swap_table()
2447 limits = md->queue->limits; in dm_swap_table()
2448 dm_put_live_table_fast(md); in dm_swap_table()
2459 map = __bind(md, table, &limits); in dm_swap_table()
2463 mutex_unlock(&md->suspend_lock); in dm_swap_table()
2471 static int lock_fs(struct mapped_device *md) in lock_fs() argument
2475 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); in lock_fs()
2477 r = freeze_bdev(md->bdev); in lock_fs()
2479 set_bit(DMF_FROZEN, &md->flags); in lock_fs()
2483 static void unlock_fs(struct mapped_device *md) in unlock_fs() argument
2485 if (!test_bit(DMF_FROZEN, &md->flags)) in unlock_fs()
2487 thaw_bdev(md->bdev); in unlock_fs()
2488 clear_bit(DMF_FROZEN, &md->flags); in unlock_fs()
2500 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend() argument
2508 lockdep_assert_held(&md->suspend_lock); in __dm_suspend()
2515 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2517 DMDEBUG("%s: suspending with flush", dm_device_name(md)); in __dm_suspend()
2532 r = lock_fs(md); in __dm_suspend()
2550 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in __dm_suspend()
2552 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2558 if (dm_request_based(md)) in __dm_suspend()
2559 dm_stop_queue(md->queue); in __dm_suspend()
2561 flush_workqueue(md->wq); in __dm_suspend()
2568 r = dm_wait_for_completion(md, task_state); in __dm_suspend()
2570 set_bit(dmf_suspended_flag, &md->flags); in __dm_suspend()
2573 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2575 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2579 dm_queue_flush(md); in __dm_suspend()
2581 if (dm_request_based(md)) in __dm_suspend()
2582 dm_start_queue(md->queue); in __dm_suspend()
2584 unlock_fs(md); in __dm_suspend()
2608 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend() argument
2614 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_suspend()
2616 if (dm_suspended_md(md)) { in dm_suspend()
2621 if (dm_suspended_internally_md(md)) { in dm_suspend()
2623 mutex_unlock(&md->suspend_lock); in dm_suspend()
2624 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_suspend()
2630 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_suspend()
2632 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); in dm_suspend()
2636 set_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2638 clear_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2641 mutex_unlock(&md->suspend_lock); in dm_suspend()
2645 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume() argument
2653 dm_queue_flush(md); in __dm_resume()
2660 if (dm_request_based(md)) in __dm_resume()
2661 dm_start_queue(md->queue); in __dm_resume()
2663 unlock_fs(md); in __dm_resume()
2668 int dm_resume(struct mapped_device *md) in dm_resume() argument
2675 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_resume()
2677 if (!dm_suspended_md(md)) in dm_resume()
2680 if (dm_suspended_internally_md(md)) { in dm_resume()
2682 mutex_unlock(&md->suspend_lock); in dm_resume()
2683 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_resume()
2689 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_resume()
2693 r = __dm_resume(md, map); in dm_resume()
2697 clear_bit(DMF_SUSPENDED, &md->flags); in dm_resume()
2699 mutex_unlock(&md->suspend_lock); in dm_resume()
2710 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend() argument
2714 lockdep_assert_held(&md->suspend_lock); in __dm_internal_suspend()
2716 if (md->internal_suspend_count++) in __dm_internal_suspend()
2719 if (dm_suspended_md(md)) { in __dm_internal_suspend()
2720 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
2724 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_suspend()
2732 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, in __dm_internal_suspend()
2735 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2737 clear_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2740 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume() argument
2742 BUG_ON(!md->internal_suspend_count); in __dm_internal_resume()
2744 if (--md->internal_suspend_count) in __dm_internal_resume()
2747 if (dm_suspended_md(md)) in __dm_internal_resume()
2754 (void) __dm_resume(md, NULL); in __dm_internal_resume()
2757 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_resume()
2759 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); in __dm_internal_resume()
2762 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush() argument
2764 mutex_lock(&md->suspend_lock); in dm_internal_suspend_noflush()
2765 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); in dm_internal_suspend_noflush()
2766 mutex_unlock(&md->suspend_lock); in dm_internal_suspend_noflush()
2770 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume() argument
2772 mutex_lock(&md->suspend_lock); in dm_internal_resume()
2773 __dm_internal_resume(md); in dm_internal_resume()
2774 mutex_unlock(&md->suspend_lock); in dm_internal_resume()
2783 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast() argument
2785 mutex_lock(&md->suspend_lock); in dm_internal_suspend_fast()
2786 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_suspend_fast()
2789 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_internal_suspend_fast()
2790 synchronize_srcu(&md->io_barrier); in dm_internal_suspend_fast()
2791 flush_workqueue(md->wq); in dm_internal_suspend_fast()
2792 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); in dm_internal_suspend_fast()
2796 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast() argument
2798 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_resume_fast()
2801 dm_queue_flush(md); in dm_internal_resume_fast()
2804 mutex_unlock(&md->suspend_lock); in dm_internal_resume_fast()
2811 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent() argument
2822 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); in dm_kobject_uevent()
2826 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, in dm_kobject_uevent()
2835 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq() argument
2837 return atomic_add_return(1, &md->uevent_seq); in dm_next_uevent_seq()
2840 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr() argument
2842 return atomic_read(&md->event_nr); in dm_get_event_nr()
2845 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event() argument
2847 return wait_event_interruptible(md->eventq, in dm_wait_event()
2848 (event_nr != atomic_read(&md->event_nr))); in dm_wait_event()
2851 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add() argument
2855 spin_lock_irqsave(&md->uevent_lock, flags); in dm_uevent_add()
2856 list_add(elist, &md->uevent_list); in dm_uevent_add()
2857 spin_unlock_irqrestore(&md->uevent_lock, flags); in dm_uevent_add()
2864 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk() argument
2866 return md->disk; in dm_disk()
2870 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject() argument
2872 return &md->kobj_holder.kobj; in dm_kobject()
2877 struct mapped_device *md; in dm_get_from_kobject() local
2879 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
2882 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_from_kobject()
2883 md = NULL; in dm_get_from_kobject()
2886 dm_get(md); in dm_get_from_kobject()
2890 return md; in dm_get_from_kobject()
2893 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md() argument
2895 return test_bit(DMF_SUSPENDED, &md->flags); in dm_suspended_md()
2898 static int dm_post_suspending_md(struct mapped_device *md) in dm_post_suspending_md() argument
2900 return test_bit(DMF_POST_SUSPENDING, &md->flags); in dm_post_suspending_md()
2903 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md() argument
2905 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in dm_suspended_internally_md()
2908 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag() argument
2910 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_test_deferred_remove_flag()
2915 return dm_suspended_md(ti->table->md); in dm_suspended()
2921 return dm_post_suspending_md(ti->table->md); in dm_post_suspending()
2927 return __noflush_suspending(ti->table->md); in dm_noflush_suspending()
2931 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, in dm_alloc_md_mempools() argument
2935 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools()
3000 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr() local
3005 table = dm_get_live_table(md, &srcu_idx); in dm_call_pr()
3014 if (dm_suspended_md(md)) { in dm_call_pr()
3025 dm_put_live_table(md, srcu_idx); in dm_call_pr()
3070 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve() local
3074 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_reserve()
3084 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_reserve()
3090 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release() local
3094 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_release()
3104 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_release()
3111 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt() local
3115 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_preempt()
3125 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_preempt()
3131 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear() local
3135 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_clear()
3145 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_clear()