xref: /OK3568_Linux_fs/kernel/drivers/md/dm-cache-metadata.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2012 Red Hat, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is released under the GPL.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef DM_CACHE_METADATA_H
8*4882a593Smuzhiyun #define DM_CACHE_METADATA_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "dm-cache-block-types.h"
11*4882a593Smuzhiyun #include "dm-cache-policy-internal.h"
12*4882a593Smuzhiyun #include "persistent-data/dm-space-map-metadata.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*----------------------------------------------------------------*/
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /* FIXME: remove this restriction */
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * The metadata device is currently limited in size.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * A metadata device larger than 16GB triggers a warning.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun #define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*----------------------------------------------------------------*/
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * Ext[234]-style compat feature flags.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * A new feature which old metadata will still be compatible with should
35*4882a593Smuzhiyun  * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * A new feature that is not compatible with old code should define a
38*4882a593Smuzhiyun  * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
39*4882a593Smuzhiyun  * that flag.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * A new feature that is not compatible with old code accessing the
42*4882a593Smuzhiyun  * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
43*4882a593Smuzhiyun  * guard the relevant code with that flag.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * As these various flags are defined they should be added to the
46*4882a593Smuzhiyun  * following masks.
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define DM_CACHE_FEATURE_COMPAT_SUPP	  0UL
50*4882a593Smuzhiyun #define DM_CACHE_FEATURE_COMPAT_RO_SUPP	  0UL
51*4882a593Smuzhiyun #define DM_CACHE_FEATURE_INCOMPAT_SUPP	  0UL
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun struct dm_cache_metadata;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun  * Reopens or creates a new, empty metadata volume.  Returns an ERR_PTR on
57*4882a593Smuzhiyun  * failure.  If reopening then features must match.
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
60*4882a593Smuzhiyun 						 sector_t data_block_size,
61*4882a593Smuzhiyun 						 bool may_format_device,
62*4882a593Smuzhiyun 						 size_t policy_hint_size,
63*4882a593Smuzhiyun 						 unsigned metadata_version);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun  * The metadata needs to know how many cache blocks there are.  We don't
69*4882a593Smuzhiyun  * care about the origin, assuming the core target is giving us valid
70*4882a593Smuzhiyun  * origin blocks to map to.
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
73*4882a593Smuzhiyun int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
76*4882a593Smuzhiyun 				   sector_t discard_block_size,
77*4882a593Smuzhiyun 				   dm_dblock_t new_nr_entries);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
80*4882a593Smuzhiyun 			       dm_dblock_t dblock, bool discarded);
81*4882a593Smuzhiyun int dm_cache_load_discards(struct dm_cache_metadata *cmd,
82*4882a593Smuzhiyun 			   load_discard_fn fn, void *context);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
87*4882a593Smuzhiyun int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
88*4882a593Smuzhiyun int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
91*4882a593Smuzhiyun 			       dm_cblock_t cblock, bool dirty,
92*4882a593Smuzhiyun 			       uint32_t hint, bool hint_valid);
93*4882a593Smuzhiyun int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
94*4882a593Smuzhiyun 			   struct dm_cache_policy *policy,
95*4882a593Smuzhiyun 			   load_mapping_fn fn,
96*4882a593Smuzhiyun 			   void *context);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
99*4882a593Smuzhiyun 			    unsigned nr_bits, unsigned long *bits);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun struct dm_cache_statistics {
102*4882a593Smuzhiyun 	uint32_t read_hits;
103*4882a593Smuzhiyun 	uint32_t read_misses;
104*4882a593Smuzhiyun 	uint32_t write_hits;
105*4882a593Smuzhiyun 	uint32_t write_misses;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
109*4882a593Smuzhiyun 				 struct dm_cache_statistics *stats);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * 'void' because it's no big deal if it fails.
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
115*4882a593Smuzhiyun 				 struct dm_cache_statistics *stats);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
120*4882a593Smuzhiyun 					   dm_block_t *result);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
123*4882a593Smuzhiyun 				   dm_block_t *result);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun void dm_cache_dump(struct dm_cache_metadata *cmd);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun  * The policy is invited to save a 32bit hint value for every cblock (eg,
129*4882a593Smuzhiyun  * for a hit count).  These are stored against the policy name.  If
130*4882a593Smuzhiyun  * policies are changed, then hints will be lost.  If the machine crashes,
131*4882a593Smuzhiyun  * hints will be lost.
132*4882a593Smuzhiyun  *
133*4882a593Smuzhiyun  * The hints are indexed by the cblock, but many policies will not
134*4882a593Smuzhiyun  * neccessarily have a fast way of accessing efficiently via cblock.  So
135*4882a593Smuzhiyun  * rather than querying the policy for each cblock, we let it walk its data
136*4882a593Smuzhiyun  * structures and fill in the hints in whatever order it wishes.
137*4882a593Smuzhiyun  */
138*4882a593Smuzhiyun int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun  * Query method.  Are all the blocks in the cache clean?
142*4882a593Smuzhiyun  */
143*4882a593Smuzhiyun int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
146*4882a593Smuzhiyun int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
147*4882a593Smuzhiyun void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
148*4882a593Smuzhiyun void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
149*4882a593Smuzhiyun int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*----------------------------------------------------------------*/
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #endif /* DM_CACHE_METADATA_H */
154