1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Internal header file _only_ for device mapper core 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Copyright (C) 2016 Red Hat, Inc. All rights reserved. 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * This file is released under the LGPL. 7*4882a593Smuzhiyun */ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun #ifndef DM_CORE_INTERNAL_H 10*4882a593Smuzhiyun #define DM_CORE_INTERNAL_H 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun #include <linux/kthread.h> 13*4882a593Smuzhiyun #include <linux/ktime.h> 14*4882a593Smuzhiyun #include <linux/genhd.h> 15*4882a593Smuzhiyun #include <linux/blk-mq.h> 16*4882a593Smuzhiyun #include <linux/keyslot-manager.h> 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun #include <trace/events/block.h> 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun #include "dm.h" 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun #define DM_RESERVED_MAX_IOS 1024 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun struct dm_kobject_holder { 25*4882a593Smuzhiyun struct kobject kobj; 26*4882a593Smuzhiyun struct completion completion; 27*4882a593Smuzhiyun }; 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun /* 30*4882a593Smuzhiyun * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. 31*4882a593Smuzhiyun * DM targets must _not_ deference a mapped_device or dm_table to directly 32*4882a593Smuzhiyun * access their members! 33*4882a593Smuzhiyun */ 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun struct mapped_device { 36*4882a593Smuzhiyun struct mutex suspend_lock; 37*4882a593Smuzhiyun 38*4882a593Smuzhiyun struct mutex table_devices_lock; 39*4882a593Smuzhiyun struct list_head table_devices; 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun /* 42*4882a593Smuzhiyun * The current mapping (struct dm_table *). 43*4882a593Smuzhiyun * Use dm_get_live_table{_fast} or take suspend_lock for 44*4882a593Smuzhiyun * dereference. 45*4882a593Smuzhiyun */ 46*4882a593Smuzhiyun void __rcu *map; 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun unsigned long flags; 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun /* Protect queue and type against concurrent access. */ 51*4882a593Smuzhiyun struct mutex type_lock; 52*4882a593Smuzhiyun enum dm_queue_mode type; 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun int numa_node_id; 55*4882a593Smuzhiyun struct request_queue *queue; 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun atomic_t holders; 58*4882a593Smuzhiyun atomic_t open_count; 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun struct dm_target *immutable_target; 61*4882a593Smuzhiyun struct target_type *immutable_target_type; 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun char name[16]; 64*4882a593Smuzhiyun struct gendisk *disk; 65*4882a593Smuzhiyun struct dax_device *dax_dev; 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* 68*4882a593Smuzhiyun * A list of ios that arrived while we were suspended. 69*4882a593Smuzhiyun */ 70*4882a593Smuzhiyun struct work_struct work; 71*4882a593Smuzhiyun wait_queue_head_t wait; 72*4882a593Smuzhiyun spinlock_t deferred_lock; 73*4882a593Smuzhiyun struct bio_list deferred; 74*4882a593Smuzhiyun 75*4882a593Smuzhiyun void *interface_ptr; 76*4882a593Smuzhiyun 77*4882a593Smuzhiyun /* 78*4882a593Smuzhiyun * Event handling. 79*4882a593Smuzhiyun */ 80*4882a593Smuzhiyun wait_queue_head_t eventq; 81*4882a593Smuzhiyun atomic_t event_nr; 82*4882a593Smuzhiyun atomic_t uevent_seq; 83*4882a593Smuzhiyun struct list_head uevent_list; 84*4882a593Smuzhiyun spinlock_t uevent_lock; /* Protect access to uevent_list */ 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun /* the number of internal suspends */ 87*4882a593Smuzhiyun unsigned internal_suspend_count; 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun /* 90*4882a593Smuzhiyun * io objects are allocated from here. 91*4882a593Smuzhiyun */ 92*4882a593Smuzhiyun struct bio_set io_bs; 93*4882a593Smuzhiyun struct bio_set bs; 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun /* 96*4882a593Smuzhiyun * Processing queue (flush) 97*4882a593Smuzhiyun */ 98*4882a593Smuzhiyun struct workqueue_struct *wq; 99*4882a593Smuzhiyun 100*4882a593Smuzhiyun /* forced geometry settings */ 101*4882a593Smuzhiyun struct hd_geometry geometry; 102*4882a593Smuzhiyun 103*4882a593Smuzhiyun /* kobject and completion */ 104*4882a593Smuzhiyun struct dm_kobject_holder kobj_holder; 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun struct block_device *bdev; 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun int swap_bios; 109*4882a593Smuzhiyun struct semaphore swap_bios_semaphore; 110*4882a593Smuzhiyun struct mutex swap_bios_lock; 111*4882a593Smuzhiyun 112*4882a593Smuzhiyun struct dm_stats stats; 113*4882a593Smuzhiyun 114*4882a593Smuzhiyun /* for blk-mq request-based DM support */ 115*4882a593Smuzhiyun struct blk_mq_tag_set *tag_set; 116*4882a593Smuzhiyun bool init_tio_pdu:1; 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun struct srcu_struct io_barrier; 119*4882a593Smuzhiyun }; 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun void disable_discard(struct mapped_device *md); 122*4882a593Smuzhiyun void disable_write_same(struct mapped_device *md); 123*4882a593Smuzhiyun void disable_write_zeroes(struct mapped_device *md); 124*4882a593Smuzhiyun dm_get_size(struct mapped_device * md)125*4882a593Smuzhiyunstatic inline sector_t dm_get_size(struct mapped_device *md) 126*4882a593Smuzhiyun { 127*4882a593Smuzhiyun return get_capacity(md->disk); 128*4882a593Smuzhiyun } 129*4882a593Smuzhiyun dm_get_stats(struct mapped_device * md)130*4882a593Smuzhiyunstatic inline struct dm_stats *dm_get_stats(struct mapped_device *md) 131*4882a593Smuzhiyun { 132*4882a593Smuzhiyun return &md->stats; 133*4882a593Smuzhiyun } 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun #define DM_TABLE_MAX_DEPTH 16 136*4882a593Smuzhiyun 137*4882a593Smuzhiyun struct dm_table { 138*4882a593Smuzhiyun struct mapped_device *md; 139*4882a593Smuzhiyun enum dm_queue_mode type; 140*4882a593Smuzhiyun 141*4882a593Smuzhiyun /* btree table */ 142*4882a593Smuzhiyun unsigned int depth; 143*4882a593Smuzhiyun unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ 144*4882a593Smuzhiyun sector_t *index[DM_TABLE_MAX_DEPTH]; 145*4882a593Smuzhiyun 146*4882a593Smuzhiyun unsigned int num_targets; 147*4882a593Smuzhiyun unsigned int num_allocated; 148*4882a593Smuzhiyun sector_t *highs; 149*4882a593Smuzhiyun struct dm_target *targets; 150*4882a593Smuzhiyun 151*4882a593Smuzhiyun struct target_type *immutable_target_type; 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun bool integrity_supported:1; 154*4882a593Smuzhiyun bool singleton:1; 155*4882a593Smuzhiyun unsigned integrity_added:1; 156*4882a593Smuzhiyun 157*4882a593Smuzhiyun /* 158*4882a593Smuzhiyun * Indicates the rw permissions for the new logical 159*4882a593Smuzhiyun * device. This should be a combination of FMODE_READ 160*4882a593Smuzhiyun * and FMODE_WRITE. 161*4882a593Smuzhiyun */ 162*4882a593Smuzhiyun fmode_t mode; 163*4882a593Smuzhiyun 164*4882a593Smuzhiyun /* a list of devices used by this table */ 165*4882a593Smuzhiyun struct list_head devices; 166*4882a593Smuzhiyun 167*4882a593Smuzhiyun /* events get handed up using this callback */ 168*4882a593Smuzhiyun void (*event_fn)(void *); 169*4882a593Smuzhiyun void *event_context; 170*4882a593Smuzhiyun 171*4882a593Smuzhiyun struct dm_md_mempools *mempools; 172*4882a593Smuzhiyun 173*4882a593Smuzhiyun #ifdef CONFIG_BLK_INLINE_ENCRYPTION 174*4882a593Smuzhiyun struct blk_keyslot_manager *ksm; 175*4882a593Smuzhiyun #endif 176*4882a593Smuzhiyun }; 177*4882a593Smuzhiyun dm_get_completion_from_kobject(struct kobject * kobj)178*4882a593Smuzhiyunstatic inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) 179*4882a593Smuzhiyun { 180*4882a593Smuzhiyun return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; 181*4882a593Smuzhiyun } 182*4882a593Smuzhiyun 183*4882a593Smuzhiyun unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max); 184*4882a593Smuzhiyun dm_message_test_buffer_overflow(char * result,unsigned maxlen)185*4882a593Smuzhiyunstatic inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 186*4882a593Smuzhiyun { 187*4882a593Smuzhiyun return !maxlen || strlen(result) + 1 >= maxlen; 188*4882a593Smuzhiyun } 189*4882a593Smuzhiyun 190*4882a593Smuzhiyun extern atomic_t dm_global_event_nr; 191*4882a593Smuzhiyun extern wait_queue_head_t dm_global_eventq; 192*4882a593Smuzhiyun void dm_issue_global_event(void); 193*4882a593Smuzhiyun 194*4882a593Smuzhiyun #endif 195