xref: /OK3568_Linux_fs/kernel/drivers/md/bcache/bcache.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _BCACHE_H
3*4882a593Smuzhiyun #define _BCACHE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * SOME HIGH LEVEL CODE DOCUMENTATION:
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Bcache mostly works with cache sets, cache devices, and backing devices.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Support for multiple cache devices hasn't quite been finished off yet, but
11*4882a593Smuzhiyun  * it's about 95% plumbed through. A cache set and its cache devices is sort of
12*4882a593Smuzhiyun  * like a md raid array and its component devices. Most of the code doesn't care
13*4882a593Smuzhiyun  * about individual cache devices, the main abstraction is the cache set.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * Multiple cache devices is intended to give us the ability to mirror dirty
16*4882a593Smuzhiyun  * cached data and metadata, without mirroring clean cached data.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * Backing devices are different, in that they have a lifetime independent of a
19*4882a593Smuzhiyun  * cache set. When you register a newly formatted backing device it'll come up
20*4882a593Smuzhiyun  * in passthrough mode, and then you can attach and detach a backing device from
21*4882a593Smuzhiyun  * a cache set at runtime - while it's mounted and in use. Detaching implicitly
22*4882a593Smuzhiyun  * invalidates any cached data for that backing device.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * A cache set can have multiple (many) backing devices attached to it.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * There's also flash only volumes - this is the reason for the distinction
27*4882a593Smuzhiyun  * between struct cached_dev and struct bcache_device. A flash only volume
28*4882a593Smuzhiyun  * works much like a bcache device that has a backing device, except the
29*4882a593Smuzhiyun  * "cached" data is always dirty. The end result is that we get thin
30*4882a593Smuzhiyun  * provisioning with very little additional code.
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * Flash only volumes work but they're not production ready because the moving
33*4882a593Smuzhiyun  * garbage collector needs more work. More on that later.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * BUCKETS/ALLOCATION:
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * Bcache is primarily designed for caching, which means that in normal
38*4882a593Smuzhiyun  * operation all of our available space will be allocated. Thus, we need an
39*4882a593Smuzhiyun  * efficient way of deleting things from the cache so we can write new things to
40*4882a593Smuzhiyun  * it.
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  * To do this, we first divide the cache device up into buckets. A bucket is the
43*4882a593Smuzhiyun  * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
44*4882a593Smuzhiyun  * works efficiently.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * Each bucket has a 16 bit priority, and an 8 bit generation associated with
47*4882a593Smuzhiyun  * it. The gens and priorities for all the buckets are stored contiguously and
48*4882a593Smuzhiyun  * packed on disk (in a linked list of buckets - aside from the superblock, all
49*4882a593Smuzhiyun  * of bcache's metadata is stored in buckets).
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * The priority is used to implement an LRU. We reset a bucket's priority when
52*4882a593Smuzhiyun  * we allocate it or on cache it, and every so often we decrement the priority
53*4882a593Smuzhiyun  * of each bucket. It could be used to implement something more sophisticated,
54*4882a593Smuzhiyun  * if anyone ever gets around to it.
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  * The generation is used for invalidating buckets. Each pointer also has an 8
57*4882a593Smuzhiyun  * bit generation embedded in it; for a pointer to be considered valid, its gen
58*4882a593Smuzhiyun  * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
59*4882a593Smuzhiyun  * we have to do is increment its gen (and write its new gen to disk; we batch
60*4882a593Smuzhiyun  * this up).
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * Bcache is entirely COW - we never write twice to a bucket, even buckets that
63*4882a593Smuzhiyun  * contain metadata (including btree nodes).
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * THE BTREE:
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * Bcache is in large part design around the btree.
68*4882a593Smuzhiyun  *
69*4882a593Smuzhiyun  * At a high level, the btree is just an index of key -> ptr tuples.
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * Keys represent extents, and thus have a size field. Keys also have a variable
72*4882a593Smuzhiyun  * number of pointers attached to them (potentially zero, which is handy for
73*4882a593Smuzhiyun  * invalidating the cache).
74*4882a593Smuzhiyun  *
75*4882a593Smuzhiyun  * The key itself is an inode:offset pair. The inode number corresponds to a
76*4882a593Smuzhiyun  * backing device or a flash only volume. The offset is the ending offset of the
77*4882a593Smuzhiyun  * extent within the inode - not the starting offset; this makes lookups
78*4882a593Smuzhiyun  * slightly more convenient.
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Pointers contain the cache device id, the offset on that device, and an 8 bit
81*4882a593Smuzhiyun  * generation number. More on the gen later.
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * Index lookups are not fully abstracted - cache lookups in particular are
84*4882a593Smuzhiyun  * still somewhat mixed in with the btree code, but things are headed in that
85*4882a593Smuzhiyun  * direction.
86*4882a593Smuzhiyun  *
87*4882a593Smuzhiyun  * Updates are fairly well abstracted, though. There are two different ways of
88*4882a593Smuzhiyun  * updating the btree; insert and replace.
89*4882a593Smuzhiyun  *
90*4882a593Smuzhiyun  * BTREE_INSERT will just take a list of keys and insert them into the btree -
91*4882a593Smuzhiyun  * overwriting (possibly only partially) any extents they overlap with. This is
92*4882a593Smuzhiyun  * used to update the index after a write.
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
95*4882a593Smuzhiyun  * overwriting a key that matches another given key. This is used for inserting
96*4882a593Smuzhiyun  * data into the cache after a cache miss, and for background writeback, and for
97*4882a593Smuzhiyun  * the moving garbage collector.
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * There is no "delete" operation; deleting things from the index is
100*4882a593Smuzhiyun  * accomplished by either by invalidating pointers (by incrementing a bucket's
101*4882a593Smuzhiyun  * gen) or by inserting a key with 0 pointers - which will overwrite anything
102*4882a593Smuzhiyun  * previously present at that location in the index.
103*4882a593Smuzhiyun  *
104*4882a593Smuzhiyun  * This means that there are always stale/invalid keys in the btree. They're
105*4882a593Smuzhiyun  * filtered out by the code that iterates through a btree node, and removed when
106*4882a593Smuzhiyun  * a btree node is rewritten.
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  * BTREE NODES:
109*4882a593Smuzhiyun  *
110*4882a593Smuzhiyun  * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
111*4882a593Smuzhiyun  * free smaller than a bucket - so, that's how big our btree nodes are.
112*4882a593Smuzhiyun  *
113*4882a593Smuzhiyun  * (If buckets are really big we'll only use part of the bucket for a btree node
114*4882a593Smuzhiyun  * - no less than 1/4th - but a bucket still contains no more than a single
115*4882a593Smuzhiyun  * btree node. I'd actually like to change this, but for now we rely on the
116*4882a593Smuzhiyun  * bucket's gen for deleting btree nodes when we rewrite/split a node.)
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119*4882a593Smuzhiyun  * btree implementation.
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * The way this is solved is that btree nodes are internally log structured; we
122*4882a593Smuzhiyun  * can append new keys to an existing btree node without rewriting it. This
123*4882a593Smuzhiyun  * means each set of keys we write is sorted, but the node is not.
124*4882a593Smuzhiyun  *
125*4882a593Smuzhiyun  * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126*4882a593Smuzhiyun  * be expensive, and we have to distinguish between the keys we have written and
127*4882a593Smuzhiyun  * the keys we haven't. So to do a lookup in a btree node, we have to search
128*4882a593Smuzhiyun  * each sorted set. But we do merge written sets together lazily, so the cost of
129*4882a593Smuzhiyun  * these extra searches is quite low (normally most of the keys in a btree node
130*4882a593Smuzhiyun  * will be in one big set, and then there'll be one or two sets that are much
131*4882a593Smuzhiyun  * smaller).
132*4882a593Smuzhiyun  *
133*4882a593Smuzhiyun  * This log structure makes bcache's btree more of a hybrid between a
134*4882a593Smuzhiyun  * conventional btree and a compacting data structure, with some of the
135*4882a593Smuzhiyun  * advantages of both.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * GARBAGE COLLECTION:
138*4882a593Smuzhiyun  *
139*4882a593Smuzhiyun  * We can't just invalidate any bucket - it might contain dirty data or
140*4882a593Smuzhiyun  * metadata. If it once contained dirty data, other writes might overwrite it
141*4882a593Smuzhiyun  * later, leaving no valid pointers into that bucket in the index.
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144*4882a593Smuzhiyun  * It also counts how much valid data it each bucket currently contains, so that
145*4882a593Smuzhiyun  * allocation can reuse buckets sooner when they've been mostly overwritten.
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * It also does some things that are really internal to the btree
148*4882a593Smuzhiyun  * implementation. If a btree node contains pointers that are stale by more than
149*4882a593Smuzhiyun  * some threshold, it rewrites the btree node to avoid the bucket's generation
150*4882a593Smuzhiyun  * wrapping around. It also merges adjacent btree nodes if they're empty enough.
151*4882a593Smuzhiyun  *
152*4882a593Smuzhiyun  * THE JOURNAL:
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * Bcache's journal is not necessary for consistency; we always strictly
155*4882a593Smuzhiyun  * order metadata writes so that the btree and everything else is consistent on
156*4882a593Smuzhiyun  * disk in the event of an unclean shutdown, and in fact bcache had writeback
157*4882a593Smuzhiyun  * caching (with recovery from unclean shutdown) before journalling was
158*4882a593Smuzhiyun  * implemented.
159*4882a593Smuzhiyun  *
160*4882a593Smuzhiyun  * Rather, the journal is purely a performance optimization; we can't complete a
161*4882a593Smuzhiyun  * write until we've updated the index on disk, otherwise the cache would be
162*4882a593Smuzhiyun  * inconsistent in the event of an unclean shutdown. This means that without the
163*4882a593Smuzhiyun  * journal, on random write workloads we constantly have to update all the leaf
164*4882a593Smuzhiyun  * nodes in the btree, and those writes will be mostly empty (appending at most
165*4882a593Smuzhiyun  * a few keys each) - highly inefficient in terms of amount of metadata writes,
166*4882a593Smuzhiyun  * and it puts more strain on the various btree resorting/compacting code.
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * The journal is just a log of keys we've inserted; on startup we just reinsert
169*4882a593Smuzhiyun  * all the keys in the open journal entries. That means that when we're updating
170*4882a593Smuzhiyun  * a node in the btree, we can wait until a 4k block of keys fills up before
171*4882a593Smuzhiyun  * writing them out.
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  * For simplicity, we only journal updates to leaf nodes; updates to parent
174*4882a593Smuzhiyun  * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175*4882a593Smuzhiyun  * the complexity to deal with journalling them (in particular, journal replay)
176*4882a593Smuzhiyun  * - updates to non leaf nodes just happen synchronously (see btree_split()).
177*4882a593Smuzhiyun  */
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun #define pr_fmt(fmt) "bcache: %s() " fmt, __func__
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #include <linux/bcache.h>
182*4882a593Smuzhiyun #include <linux/bio.h>
183*4882a593Smuzhiyun #include <linux/kobject.h>
184*4882a593Smuzhiyun #include <linux/list.h>
185*4882a593Smuzhiyun #include <linux/mutex.h>
186*4882a593Smuzhiyun #include <linux/rbtree.h>
187*4882a593Smuzhiyun #include <linux/rwsem.h>
188*4882a593Smuzhiyun #include <linux/refcount.h>
189*4882a593Smuzhiyun #include <linux/types.h>
190*4882a593Smuzhiyun #include <linux/workqueue.h>
191*4882a593Smuzhiyun #include <linux/kthread.h>
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #include "bset.h"
194*4882a593Smuzhiyun #include "util.h"
195*4882a593Smuzhiyun #include "closure.h"
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun struct bucket {
198*4882a593Smuzhiyun 	atomic_t	pin;
199*4882a593Smuzhiyun 	uint16_t	prio;
200*4882a593Smuzhiyun 	uint8_t		gen;
201*4882a593Smuzhiyun 	uint8_t		last_gc; /* Most out of date gen in the btree */
202*4882a593Smuzhiyun 	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
203*4882a593Smuzhiyun };
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun  * I'd use bitfields for these, but I don't trust the compiler not to screw me
207*4882a593Smuzhiyun  * as multiple threads touch struct bucket without locking
208*4882a593Smuzhiyun  */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
211*4882a593Smuzhiyun #define GC_MARK_RECLAIMABLE	1
212*4882a593Smuzhiyun #define GC_MARK_DIRTY		2
213*4882a593Smuzhiyun #define GC_MARK_METADATA	3
214*4882a593Smuzhiyun #define GC_SECTORS_USED_SIZE	13
215*4882a593Smuzhiyun #define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
216*4882a593Smuzhiyun BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
217*4882a593Smuzhiyun BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun #include "journal.h"
220*4882a593Smuzhiyun #include "stats.h"
221*4882a593Smuzhiyun struct search;
222*4882a593Smuzhiyun struct btree;
223*4882a593Smuzhiyun struct keybuf;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun struct keybuf_key {
226*4882a593Smuzhiyun 	struct rb_node		node;
227*4882a593Smuzhiyun 	BKEY_PADDED(key);
228*4882a593Smuzhiyun 	void			*private;
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun struct keybuf {
232*4882a593Smuzhiyun 	struct bkey		last_scanned;
233*4882a593Smuzhiyun 	spinlock_t		lock;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/*
236*4882a593Smuzhiyun 	 * Beginning and end of range in rb tree - so that we can skip taking
237*4882a593Smuzhiyun 	 * lock and checking the rb tree when we need to check for overlapping
238*4882a593Smuzhiyun 	 * keys.
239*4882a593Smuzhiyun 	 */
240*4882a593Smuzhiyun 	struct bkey		start;
241*4882a593Smuzhiyun 	struct bkey		end;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	struct rb_root		keys;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun #define KEYBUF_NR		500
246*4882a593Smuzhiyun 	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun struct bcache_device {
250*4882a593Smuzhiyun 	struct closure		cl;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	struct kobject		kobj;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	struct cache_set	*c;
255*4882a593Smuzhiyun 	unsigned int		id;
256*4882a593Smuzhiyun #define BCACHEDEVNAME_SIZE	12
257*4882a593Smuzhiyun 	char			name[BCACHEDEVNAME_SIZE];
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	struct gendisk		*disk;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	unsigned long		flags;
262*4882a593Smuzhiyun #define BCACHE_DEV_CLOSING		0
263*4882a593Smuzhiyun #define BCACHE_DEV_DETACHING		1
264*4882a593Smuzhiyun #define BCACHE_DEV_UNLINK_DONE		2
265*4882a593Smuzhiyun #define BCACHE_DEV_WB_RUNNING		3
266*4882a593Smuzhiyun #define BCACHE_DEV_RATE_DW_RUNNING	4
267*4882a593Smuzhiyun 	int			nr_stripes;
268*4882a593Smuzhiyun 	unsigned int		stripe_size;
269*4882a593Smuzhiyun 	atomic_t		*stripe_sectors_dirty;
270*4882a593Smuzhiyun 	unsigned long		*full_dirty_stripes;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	struct bio_set		bio_split;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	unsigned int		data_csum:1;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	int (*cache_miss)(struct btree *b, struct search *s,
277*4882a593Smuzhiyun 			  struct bio *bio, unsigned int sectors);
278*4882a593Smuzhiyun 	int (*ioctl)(struct bcache_device *d, fmode_t mode,
279*4882a593Smuzhiyun 		     unsigned int cmd, unsigned long arg);
280*4882a593Smuzhiyun };
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun struct io {
283*4882a593Smuzhiyun 	/* Used to track sequential IO so it can be skipped */
284*4882a593Smuzhiyun 	struct hlist_node	hash;
285*4882a593Smuzhiyun 	struct list_head	lru;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	unsigned long		jiffies;
288*4882a593Smuzhiyun 	unsigned int		sequential;
289*4882a593Smuzhiyun 	sector_t		last;
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun enum stop_on_failure {
293*4882a593Smuzhiyun 	BCH_CACHED_DEV_STOP_AUTO = 0,
294*4882a593Smuzhiyun 	BCH_CACHED_DEV_STOP_ALWAYS,
295*4882a593Smuzhiyun 	BCH_CACHED_DEV_STOP_MODE_MAX,
296*4882a593Smuzhiyun };
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun struct cached_dev {
299*4882a593Smuzhiyun 	struct list_head	list;
300*4882a593Smuzhiyun 	struct bcache_device	disk;
301*4882a593Smuzhiyun 	struct block_device	*bdev;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	struct cache_sb		sb;
304*4882a593Smuzhiyun 	struct cache_sb_disk	*sb_disk;
305*4882a593Smuzhiyun 	struct bio		sb_bio;
306*4882a593Smuzhiyun 	struct bio_vec		sb_bv[1];
307*4882a593Smuzhiyun 	struct closure		sb_write;
308*4882a593Smuzhiyun 	struct semaphore	sb_write_mutex;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* Refcount on the cache set. Always nonzero when we're caching. */
311*4882a593Smuzhiyun 	refcount_t		count;
312*4882a593Smuzhiyun 	struct work_struct	detach;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/*
315*4882a593Smuzhiyun 	 * Device might not be running if it's dirty and the cache set hasn't
316*4882a593Smuzhiyun 	 * showed up yet.
317*4882a593Smuzhiyun 	 */
318*4882a593Smuzhiyun 	atomic_t		running;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/*
321*4882a593Smuzhiyun 	 * Writes take a shared lock from start to finish; scanning for dirty
322*4882a593Smuzhiyun 	 * data to refill the rb tree requires an exclusive lock.
323*4882a593Smuzhiyun 	 */
324*4882a593Smuzhiyun 	struct rw_semaphore	writeback_lock;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/*
327*4882a593Smuzhiyun 	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
328*4882a593Smuzhiyun 	 * data in the cache. Protected by writeback_lock; must have an
329*4882a593Smuzhiyun 	 * shared lock to set and exclusive lock to clear.
330*4882a593Smuzhiyun 	 */
331*4882a593Smuzhiyun 	atomic_t		has_dirty;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun #define BCH_CACHE_READA_ALL		0
334*4882a593Smuzhiyun #define BCH_CACHE_READA_META_ONLY	1
335*4882a593Smuzhiyun 	unsigned int		cache_readahead_policy;
336*4882a593Smuzhiyun 	struct bch_ratelimit	writeback_rate;
337*4882a593Smuzhiyun 	struct delayed_work	writeback_rate_update;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/* Limit number of writeback bios in flight */
340*4882a593Smuzhiyun 	struct semaphore	in_flight;
341*4882a593Smuzhiyun 	struct task_struct	*writeback_thread;
342*4882a593Smuzhiyun 	struct workqueue_struct	*writeback_write_wq;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	struct keybuf		writeback_keys;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	struct task_struct	*status_update_thread;
347*4882a593Smuzhiyun 	/*
348*4882a593Smuzhiyun 	 * Order the write-half of writeback operations strongly in dispatch
349*4882a593Smuzhiyun 	 * order.  (Maintain LBA order; don't allow reads completing out of
350*4882a593Smuzhiyun 	 * order to re-order the writes...)
351*4882a593Smuzhiyun 	 */
352*4882a593Smuzhiyun 	struct closure_waitlist writeback_ordering_wait;
353*4882a593Smuzhiyun 	atomic_t		writeback_sequence_next;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/* For tracking sequential IO */
356*4882a593Smuzhiyun #define RECENT_IO_BITS	7
357*4882a593Smuzhiyun #define RECENT_IO	(1 << RECENT_IO_BITS)
358*4882a593Smuzhiyun 	struct io		io[RECENT_IO];
359*4882a593Smuzhiyun 	struct hlist_head	io_hash[RECENT_IO + 1];
360*4882a593Smuzhiyun 	struct list_head	io_lru;
361*4882a593Smuzhiyun 	spinlock_t		io_lock;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	struct cache_accounting	accounting;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* The rest of this all shows up in sysfs */
366*4882a593Smuzhiyun 	unsigned int		sequential_cutoff;
367*4882a593Smuzhiyun 	unsigned int		readahead;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	unsigned int		io_disable:1;
370*4882a593Smuzhiyun 	unsigned int		verify:1;
371*4882a593Smuzhiyun 	unsigned int		bypass_torture_test:1;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	unsigned int		partial_stripes_expensive:1;
374*4882a593Smuzhiyun 	unsigned int		writeback_metadata:1;
375*4882a593Smuzhiyun 	unsigned int		writeback_running:1;
376*4882a593Smuzhiyun 	unsigned char		writeback_percent;
377*4882a593Smuzhiyun 	unsigned int		writeback_delay;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	uint64_t		writeback_rate_target;
380*4882a593Smuzhiyun 	int64_t			writeback_rate_proportional;
381*4882a593Smuzhiyun 	int64_t			writeback_rate_integral;
382*4882a593Smuzhiyun 	int64_t			writeback_rate_integral_scaled;
383*4882a593Smuzhiyun 	int32_t			writeback_rate_change;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	unsigned int		writeback_rate_update_seconds;
386*4882a593Smuzhiyun 	unsigned int		writeback_rate_i_term_inverse;
387*4882a593Smuzhiyun 	unsigned int		writeback_rate_p_term_inverse;
388*4882a593Smuzhiyun 	unsigned int		writeback_rate_minimum;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	enum stop_on_failure	stop_when_cache_set_failed;
391*4882a593Smuzhiyun #define DEFAULT_CACHED_DEV_ERROR_LIMIT	64
392*4882a593Smuzhiyun 	atomic_t		io_errors;
393*4882a593Smuzhiyun 	unsigned int		error_limit;
394*4882a593Smuzhiyun 	unsigned int		offline_seconds;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	char			backing_dev_name[BDEVNAME_SIZE];
397*4882a593Smuzhiyun };
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun enum alloc_reserve {
400*4882a593Smuzhiyun 	RESERVE_BTREE,
401*4882a593Smuzhiyun 	RESERVE_PRIO,
402*4882a593Smuzhiyun 	RESERVE_MOVINGGC,
403*4882a593Smuzhiyun 	RESERVE_NONE,
404*4882a593Smuzhiyun 	RESERVE_NR,
405*4882a593Smuzhiyun };
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun struct cache {
408*4882a593Smuzhiyun 	struct cache_set	*set;
409*4882a593Smuzhiyun 	struct cache_sb		sb;
410*4882a593Smuzhiyun 	struct cache_sb_disk	*sb_disk;
411*4882a593Smuzhiyun 	struct bio		sb_bio;
412*4882a593Smuzhiyun 	struct bio_vec		sb_bv[1];
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	struct kobject		kobj;
415*4882a593Smuzhiyun 	struct block_device	*bdev;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	struct task_struct	*alloc_thread;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	struct closure		prio;
420*4882a593Smuzhiyun 	struct prio_set		*disk_buckets;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	/*
423*4882a593Smuzhiyun 	 * When allocating new buckets, prio_write() gets first dibs - since we
424*4882a593Smuzhiyun 	 * may not be allocate at all without writing priorities and gens.
425*4882a593Smuzhiyun 	 * prio_last_buckets[] contains the last buckets we wrote priorities to
426*4882a593Smuzhiyun 	 * (so gc can mark them as metadata), prio_buckets[] contains the
427*4882a593Smuzhiyun 	 * buckets allocated for the next prio write.
428*4882a593Smuzhiyun 	 */
429*4882a593Smuzhiyun 	uint64_t		*prio_buckets;
430*4882a593Smuzhiyun 	uint64_t		*prio_last_buckets;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/*
433*4882a593Smuzhiyun 	 * free: Buckets that are ready to be used
434*4882a593Smuzhiyun 	 *
435*4882a593Smuzhiyun 	 * free_inc: Incoming buckets - these are buckets that currently have
436*4882a593Smuzhiyun 	 * cached data in them, and we can't reuse them until after we write
437*4882a593Smuzhiyun 	 * their new gen to disk. After prio_write() finishes writing the new
438*4882a593Smuzhiyun 	 * gens/prios, they'll be moved to the free list (and possibly discarded
439*4882a593Smuzhiyun 	 * in the process)
440*4882a593Smuzhiyun 	 */
441*4882a593Smuzhiyun 	DECLARE_FIFO(long, free)[RESERVE_NR];
442*4882a593Smuzhiyun 	DECLARE_FIFO(long, free_inc);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	size_t			fifo_last_bucket;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* Allocation stuff: */
447*4882a593Smuzhiyun 	struct bucket		*buckets;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	DECLARE_HEAP(struct bucket *, heap);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/*
452*4882a593Smuzhiyun 	 * If nonzero, we know we aren't going to find any buckets to invalidate
453*4882a593Smuzhiyun 	 * until a gc finishes - otherwise we could pointlessly burn a ton of
454*4882a593Smuzhiyun 	 * cpu
455*4882a593Smuzhiyun 	 */
456*4882a593Smuzhiyun 	unsigned int		invalidate_needs_gc;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	bool			discard; /* Get rid of? */
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	struct journal_device	journal;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/* The rest of this all shows up in sysfs */
463*4882a593Smuzhiyun #define IO_ERROR_SHIFT		20
464*4882a593Smuzhiyun 	atomic_t		io_errors;
465*4882a593Smuzhiyun 	atomic_t		io_count;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	atomic_long_t		meta_sectors_written;
468*4882a593Smuzhiyun 	atomic_long_t		btree_sectors_written;
469*4882a593Smuzhiyun 	atomic_long_t		sectors_written;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	char			cache_dev_name[BDEVNAME_SIZE];
472*4882a593Smuzhiyun };
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun struct gc_stat {
475*4882a593Smuzhiyun 	size_t			nodes;
476*4882a593Smuzhiyun 	size_t			nodes_pre;
477*4882a593Smuzhiyun 	size_t			key_bytes;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	size_t			nkeys;
480*4882a593Smuzhiyun 	uint64_t		data;	/* sectors */
481*4882a593Smuzhiyun 	unsigned int		in_use; /* percent */
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun  * Flag bits, for how the cache set is shutting down, and what phase it's at:
486*4882a593Smuzhiyun  *
487*4882a593Smuzhiyun  * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
488*4882a593Smuzhiyun  * all the backing devices first (their cached data gets invalidated, and they
489*4882a593Smuzhiyun  * won't automatically reattach).
490*4882a593Smuzhiyun  *
491*4882a593Smuzhiyun  * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
492*4882a593Smuzhiyun  * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
493*4882a593Smuzhiyun  * flushing dirty data).
494*4882a593Smuzhiyun  *
495*4882a593Smuzhiyun  * CACHE_SET_RUNNING means all cache devices have been registered and journal
496*4882a593Smuzhiyun  * replay is complete.
497*4882a593Smuzhiyun  *
498*4882a593Smuzhiyun  * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
499*4882a593Smuzhiyun  * external and internal I/O should be denied when this flag is set.
500*4882a593Smuzhiyun  *
501*4882a593Smuzhiyun  */
502*4882a593Smuzhiyun #define CACHE_SET_UNREGISTERING		0
503*4882a593Smuzhiyun #define	CACHE_SET_STOPPING		1
504*4882a593Smuzhiyun #define	CACHE_SET_RUNNING		2
505*4882a593Smuzhiyun #define CACHE_SET_IO_DISABLE		3
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun struct cache_set {
508*4882a593Smuzhiyun 	struct closure		cl;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	struct list_head	list;
511*4882a593Smuzhiyun 	struct kobject		kobj;
512*4882a593Smuzhiyun 	struct kobject		internal;
513*4882a593Smuzhiyun 	struct dentry		*debug;
514*4882a593Smuzhiyun 	struct cache_accounting accounting;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	unsigned long		flags;
517*4882a593Smuzhiyun 	atomic_t		idle_counter;
518*4882a593Smuzhiyun 	atomic_t		at_max_writeback_rate;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	struct cache		*cache;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	struct bcache_device	**devices;
523*4882a593Smuzhiyun 	unsigned int		devices_max_used;
524*4882a593Smuzhiyun 	atomic_t		attached_dev_nr;
525*4882a593Smuzhiyun 	struct list_head	cached_devs;
526*4882a593Smuzhiyun 	uint64_t		cached_dev_sectors;
527*4882a593Smuzhiyun 	atomic_long_t		flash_dev_dirty_sectors;
528*4882a593Smuzhiyun 	struct closure		caching;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	struct closure		sb_write;
531*4882a593Smuzhiyun 	struct semaphore	sb_write_mutex;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	mempool_t		search;
534*4882a593Smuzhiyun 	mempool_t		bio_meta;
535*4882a593Smuzhiyun 	struct bio_set		bio_split;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* For the btree cache */
538*4882a593Smuzhiyun 	struct shrinker		shrink;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	/* For the btree cache and anything allocation related */
541*4882a593Smuzhiyun 	struct mutex		bucket_lock;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* log2(bucket_size), in sectors */
544*4882a593Smuzhiyun 	unsigned short		bucket_bits;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	/* log2(block_size), in sectors */
547*4882a593Smuzhiyun 	unsigned short		block_bits;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/*
550*4882a593Smuzhiyun 	 * Default number of pages for a new btree node - may be less than a
551*4882a593Smuzhiyun 	 * full bucket
552*4882a593Smuzhiyun 	 */
553*4882a593Smuzhiyun 	unsigned int		btree_pages;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	/*
556*4882a593Smuzhiyun 	 * Lists of struct btrees; lru is the list for structs that have memory
557*4882a593Smuzhiyun 	 * allocated for actual btree node, freed is for structs that do not.
558*4882a593Smuzhiyun 	 *
559*4882a593Smuzhiyun 	 * We never free a struct btree, except on shutdown - we just put it on
560*4882a593Smuzhiyun 	 * the btree_cache_freed list and reuse it later. This simplifies the
561*4882a593Smuzhiyun 	 * code, and it doesn't cost us much memory as the memory usage is
562*4882a593Smuzhiyun 	 * dominated by buffers that hold the actual btree node data and those
563*4882a593Smuzhiyun 	 * can be freed - and the number of struct btrees allocated is
564*4882a593Smuzhiyun 	 * effectively bounded.
565*4882a593Smuzhiyun 	 *
566*4882a593Smuzhiyun 	 * btree_cache_freeable effectively is a small cache - we use it because
567*4882a593Smuzhiyun 	 * high order page allocations can be rather expensive, and it's quite
568*4882a593Smuzhiyun 	 * common to delete and allocate btree nodes in quick succession. It
569*4882a593Smuzhiyun 	 * should never grow past ~2-3 nodes in practice.
570*4882a593Smuzhiyun 	 */
571*4882a593Smuzhiyun 	struct list_head	btree_cache;
572*4882a593Smuzhiyun 	struct list_head	btree_cache_freeable;
573*4882a593Smuzhiyun 	struct list_head	btree_cache_freed;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	/* Number of elements in btree_cache + btree_cache_freeable lists */
576*4882a593Smuzhiyun 	unsigned int		btree_cache_used;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/*
579*4882a593Smuzhiyun 	 * If we need to allocate memory for a new btree node and that
580*4882a593Smuzhiyun 	 * allocation fails, we can cannibalize another node in the btree cache
581*4882a593Smuzhiyun 	 * to satisfy the allocation - lock to guarantee only one thread does
582*4882a593Smuzhiyun 	 * this at a time:
583*4882a593Smuzhiyun 	 */
584*4882a593Smuzhiyun 	wait_queue_head_t	btree_cache_wait;
585*4882a593Smuzhiyun 	struct task_struct	*btree_cache_alloc_lock;
586*4882a593Smuzhiyun 	spinlock_t		btree_cannibalize_lock;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	/*
589*4882a593Smuzhiyun 	 * When we free a btree node, we increment the gen of the bucket the
590*4882a593Smuzhiyun 	 * node is in - but we can't rewrite the prios and gens until we
591*4882a593Smuzhiyun 	 * finished whatever it is we were doing, otherwise after a crash the
592*4882a593Smuzhiyun 	 * btree node would be freed but for say a split, we might not have the
593*4882a593Smuzhiyun 	 * pointers to the new nodes inserted into the btree yet.
594*4882a593Smuzhiyun 	 *
595*4882a593Smuzhiyun 	 * This is a refcount that blocks prio_write() until the new keys are
596*4882a593Smuzhiyun 	 * written.
597*4882a593Smuzhiyun 	 */
598*4882a593Smuzhiyun 	atomic_t		prio_blocked;
599*4882a593Smuzhiyun 	wait_queue_head_t	bucket_wait;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	/*
602*4882a593Smuzhiyun 	 * For any bio we don't skip we subtract the number of sectors from
603*4882a593Smuzhiyun 	 * rescale; when it hits 0 we rescale all the bucket priorities.
604*4882a593Smuzhiyun 	 */
605*4882a593Smuzhiyun 	atomic_t		rescale;
606*4882a593Smuzhiyun 	/*
607*4882a593Smuzhiyun 	 * used for GC, identify if any front side I/Os is inflight
608*4882a593Smuzhiyun 	 */
609*4882a593Smuzhiyun 	atomic_t		search_inflight;
610*4882a593Smuzhiyun 	/*
611*4882a593Smuzhiyun 	 * When we invalidate buckets, we use both the priority and the amount
612*4882a593Smuzhiyun 	 * of good data to determine which buckets to reuse first - to weight
613*4882a593Smuzhiyun 	 * those together consistently we keep track of the smallest nonzero
614*4882a593Smuzhiyun 	 * priority of any bucket.
615*4882a593Smuzhiyun 	 */
616*4882a593Smuzhiyun 	uint16_t		min_prio;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	/*
619*4882a593Smuzhiyun 	 * max(gen - last_gc) for all buckets. When it gets too big we have to
620*4882a593Smuzhiyun 	 * gc to keep gens from wrapping around.
621*4882a593Smuzhiyun 	 */
622*4882a593Smuzhiyun 	uint8_t			need_gc;
623*4882a593Smuzhiyun 	struct gc_stat		gc_stats;
624*4882a593Smuzhiyun 	size_t			nbuckets;
625*4882a593Smuzhiyun 	size_t			avail_nbuckets;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	struct task_struct	*gc_thread;
628*4882a593Smuzhiyun 	/* Where in the btree gc currently is */
629*4882a593Smuzhiyun 	struct bkey		gc_done;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	/*
632*4882a593Smuzhiyun 	 * For automatical garbage collection after writeback completed, this
633*4882a593Smuzhiyun 	 * varialbe is used as bit fields,
634*4882a593Smuzhiyun 	 * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
635*4882a593Smuzhiyun 	 * - 0000 0010b (BCH_DO_AUTO_GC):     do gc after writeback
636*4882a593Smuzhiyun 	 * This is an optimization for following write request after writeback
637*4882a593Smuzhiyun 	 * finished, but read hit rate dropped due to clean data on cache is
638*4882a593Smuzhiyun 	 * discarded. Unless user explicitly sets it via sysfs, it won't be
639*4882a593Smuzhiyun 	 * enabled.
640*4882a593Smuzhiyun 	 */
641*4882a593Smuzhiyun #define BCH_ENABLE_AUTO_GC	1
642*4882a593Smuzhiyun #define BCH_DO_AUTO_GC		2
643*4882a593Smuzhiyun 	uint8_t			gc_after_writeback;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/*
646*4882a593Smuzhiyun 	 * The allocation code needs gc_mark in struct bucket to be correct, but
647*4882a593Smuzhiyun 	 * it's not while a gc is in progress. Protected by bucket_lock.
648*4882a593Smuzhiyun 	 */
649*4882a593Smuzhiyun 	int			gc_mark_valid;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/* Counts how many sectors bio_insert has added to the cache */
652*4882a593Smuzhiyun 	atomic_t		sectors_to_gc;
653*4882a593Smuzhiyun 	wait_queue_head_t	gc_wait;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	struct keybuf		moving_gc_keys;
656*4882a593Smuzhiyun 	/* Number of moving GC bios in flight */
657*4882a593Smuzhiyun 	struct semaphore	moving_in_flight;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	struct workqueue_struct	*moving_gc_wq;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	struct btree		*root;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun #ifdef CONFIG_BCACHE_DEBUG
664*4882a593Smuzhiyun 	struct btree		*verify_data;
665*4882a593Smuzhiyun 	struct bset		*verify_ondisk;
666*4882a593Smuzhiyun 	struct mutex		verify_lock;
667*4882a593Smuzhiyun #endif
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	uint8_t			set_uuid[16];
670*4882a593Smuzhiyun 	unsigned int		nr_uuids;
671*4882a593Smuzhiyun 	struct uuid_entry	*uuids;
672*4882a593Smuzhiyun 	BKEY_PADDED(uuid_bucket);
673*4882a593Smuzhiyun 	struct closure		uuid_write;
674*4882a593Smuzhiyun 	struct semaphore	uuid_write_mutex;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	/*
677*4882a593Smuzhiyun 	 * A btree node on disk could have too many bsets for an iterator to fit
678*4882a593Smuzhiyun 	 * on the stack - have to dynamically allocate them.
679*4882a593Smuzhiyun 	 * bch_cache_set_alloc() will make sure the pool can allocate iterators
680*4882a593Smuzhiyun 	 * equipped with enough room that can host
681*4882a593Smuzhiyun 	 *     (sb.bucket_size / sb.block_size)
682*4882a593Smuzhiyun 	 * btree_iter_sets, which is more than static MAX_BSETS.
683*4882a593Smuzhiyun 	 */
684*4882a593Smuzhiyun 	mempool_t		fill_iter;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	struct bset_sort_state	sort;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/* List of buckets we're currently writing data to */
689*4882a593Smuzhiyun 	struct list_head	data_buckets;
690*4882a593Smuzhiyun 	spinlock_t		data_bucket_lock;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	struct journal		journal;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun #define CONGESTED_MAX		1024
695*4882a593Smuzhiyun 	unsigned int		congested_last_us;
696*4882a593Smuzhiyun 	atomic_t		congested;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	/* The rest of this all shows up in sysfs */
699*4882a593Smuzhiyun 	unsigned int		congested_read_threshold_us;
700*4882a593Smuzhiyun 	unsigned int		congested_write_threshold_us;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	struct time_stats	btree_gc_time;
703*4882a593Smuzhiyun 	struct time_stats	btree_split_time;
704*4882a593Smuzhiyun 	struct time_stats	btree_read_time;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	atomic_long_t		cache_read_races;
707*4882a593Smuzhiyun 	atomic_long_t		writeback_keys_done;
708*4882a593Smuzhiyun 	atomic_long_t		writeback_keys_failed;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	atomic_long_t		reclaim;
711*4882a593Smuzhiyun 	atomic_long_t		reclaimed_journal_buckets;
712*4882a593Smuzhiyun 	atomic_long_t		flush_write;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	enum			{
715*4882a593Smuzhiyun 		ON_ERROR_UNREGISTER,
716*4882a593Smuzhiyun 		ON_ERROR_PANIC,
717*4882a593Smuzhiyun 	}			on_error;
718*4882a593Smuzhiyun #define DEFAULT_IO_ERROR_LIMIT 8
719*4882a593Smuzhiyun 	unsigned int		error_limit;
720*4882a593Smuzhiyun 	unsigned int		error_decay;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	unsigned short		journal_delay_ms;
723*4882a593Smuzhiyun 	bool			expensive_debug_checks;
724*4882a593Smuzhiyun 	unsigned int		verify:1;
725*4882a593Smuzhiyun 	unsigned int		key_merging_disabled:1;
726*4882a593Smuzhiyun 	unsigned int		gc_always_rewrite:1;
727*4882a593Smuzhiyun 	unsigned int		shrinker_disabled:1;
728*4882a593Smuzhiyun 	unsigned int		copy_gc_enabled:1;
729*4882a593Smuzhiyun 	unsigned int		idle_max_writeback_rate_enabled:1;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun #define BUCKET_HASH_BITS	12
732*4882a593Smuzhiyun 	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
733*4882a593Smuzhiyun };
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun struct bbio {
736*4882a593Smuzhiyun 	unsigned int		submit_time_us;
737*4882a593Smuzhiyun 	union {
738*4882a593Smuzhiyun 		struct bkey	key;
739*4882a593Smuzhiyun 		uint64_t	_pad[3];
740*4882a593Smuzhiyun 		/*
741*4882a593Smuzhiyun 		 * We only need pad = 3 here because we only ever carry around a
742*4882a593Smuzhiyun 		 * single pointer - i.e. the pointer we're doing io to/from.
743*4882a593Smuzhiyun 		 */
744*4882a593Smuzhiyun 	};
745*4882a593Smuzhiyun 	struct bio		bio;
746*4882a593Smuzhiyun };
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun #define BTREE_PRIO		USHRT_MAX
749*4882a593Smuzhiyun #define INITIAL_PRIO		32768U
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun #define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
752*4882a593Smuzhiyun #define btree_blocks(b)							\
753*4882a593Smuzhiyun 	((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun #define btree_default_blocks(c)						\
756*4882a593Smuzhiyun 	((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun #define bucket_bytes(ca)	((ca)->sb.bucket_size << 9)
759*4882a593Smuzhiyun #define block_bytes(ca)		((ca)->sb.block_size << 9)
760*4882a593Smuzhiyun 
meta_bucket_pages(struct cache_sb * sb)761*4882a593Smuzhiyun static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun 	unsigned int n, max_pages;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	max_pages = min_t(unsigned int,
766*4882a593Smuzhiyun 			  __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
767*4882a593Smuzhiyun 			  MAX_ORDER_NR_PAGES);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	n = sb->bucket_size / PAGE_SECTORS;
770*4882a593Smuzhiyun 	if (n > max_pages)
771*4882a593Smuzhiyun 		n = max_pages;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	return n;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun 
meta_bucket_bytes(struct cache_sb * sb)776*4882a593Smuzhiyun static inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun 	return meta_bucket_pages(sb) << PAGE_SHIFT;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun #define prios_per_bucket(ca)						\
782*4882a593Smuzhiyun 	((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) /	\
783*4882a593Smuzhiyun 	 sizeof(struct bucket_disk))
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun #define prio_buckets(ca)						\
786*4882a593Smuzhiyun 	DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
787*4882a593Smuzhiyun 
sector_to_bucket(struct cache_set * c,sector_t s)788*4882a593Smuzhiyun static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	return s >> c->bucket_bits;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
bucket_to_sector(struct cache_set * c,size_t b)793*4882a593Smuzhiyun static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	return ((sector_t) b) << c->bucket_bits;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun 
bucket_remainder(struct cache_set * c,sector_t s)798*4882a593Smuzhiyun static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun 	return s & (c->cache->sb.bucket_size - 1);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun 
PTR_CACHE(struct cache_set * c,const struct bkey * k,unsigned int ptr)803*4882a593Smuzhiyun static inline struct cache *PTR_CACHE(struct cache_set *c,
804*4882a593Smuzhiyun 				      const struct bkey *k,
805*4882a593Smuzhiyun 				      unsigned int ptr)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun 	return c->cache;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
PTR_BUCKET_NR(struct cache_set * c,const struct bkey * k,unsigned int ptr)810*4882a593Smuzhiyun static inline size_t PTR_BUCKET_NR(struct cache_set *c,
811*4882a593Smuzhiyun 				   const struct bkey *k,
812*4882a593Smuzhiyun 				   unsigned int ptr)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
PTR_BUCKET(struct cache_set * c,const struct bkey * k,unsigned int ptr)817*4882a593Smuzhiyun static inline struct bucket *PTR_BUCKET(struct cache_set *c,
818*4882a593Smuzhiyun 					const struct bkey *k,
819*4882a593Smuzhiyun 					unsigned int ptr)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
gen_after(uint8_t a,uint8_t b)824*4882a593Smuzhiyun static inline uint8_t gen_after(uint8_t a, uint8_t b)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun 	uint8_t r = a - b;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	return r > 128U ? 0 : r;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun 
ptr_stale(struct cache_set * c,const struct bkey * k,unsigned int i)831*4882a593Smuzhiyun static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
832*4882a593Smuzhiyun 				unsigned int i)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun 	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun 
ptr_available(struct cache_set * c,const struct bkey * k,unsigned int i)837*4882a593Smuzhiyun static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
838*4882a593Smuzhiyun 				 unsigned int i)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun 	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun /* Btree key macros */
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun /*
846*4882a593Smuzhiyun  * This is used for various on disk data structures - cache_sb, prio_set, bset,
847*4882a593Smuzhiyun  * jset: The checksum is _always_ the first 8 bytes of these structs
848*4882a593Smuzhiyun  */
849*4882a593Smuzhiyun #define csum_set(i)							\
850*4882a593Smuzhiyun 	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
851*4882a593Smuzhiyun 		  ((void *) bset_bkey_last(i)) -			\
852*4882a593Smuzhiyun 		  (((void *) (i)) + sizeof(uint64_t)))
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun /* Error handling macros */
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun #define btree_bug(b, ...)						\
857*4882a593Smuzhiyun do {									\
858*4882a593Smuzhiyun 	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
859*4882a593Smuzhiyun 		dump_stack();						\
860*4882a593Smuzhiyun } while (0)
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun #define cache_bug(c, ...)						\
863*4882a593Smuzhiyun do {									\
864*4882a593Smuzhiyun 	if (bch_cache_set_error(c, __VA_ARGS__))			\
865*4882a593Smuzhiyun 		dump_stack();						\
866*4882a593Smuzhiyun } while (0)
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun #define btree_bug_on(cond, b, ...)					\
869*4882a593Smuzhiyun do {									\
870*4882a593Smuzhiyun 	if (cond)							\
871*4882a593Smuzhiyun 		btree_bug(b, __VA_ARGS__);				\
872*4882a593Smuzhiyun } while (0)
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun #define cache_bug_on(cond, c, ...)					\
875*4882a593Smuzhiyun do {									\
876*4882a593Smuzhiyun 	if (cond)							\
877*4882a593Smuzhiyun 		cache_bug(c, __VA_ARGS__);				\
878*4882a593Smuzhiyun } while (0)
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun #define cache_set_err_on(cond, c, ...)					\
881*4882a593Smuzhiyun do {									\
882*4882a593Smuzhiyun 	if (cond)							\
883*4882a593Smuzhiyun 		bch_cache_set_error(c, __VA_ARGS__);			\
884*4882a593Smuzhiyun } while (0)
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun /* Looping macros */
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun #define for_each_bucket(b, ca)						\
889*4882a593Smuzhiyun 	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
890*4882a593Smuzhiyun 	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
891*4882a593Smuzhiyun 
cached_dev_put(struct cached_dev * dc)892*4882a593Smuzhiyun static inline void cached_dev_put(struct cached_dev *dc)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	if (refcount_dec_and_test(&dc->count))
895*4882a593Smuzhiyun 		schedule_work(&dc->detach);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun 
cached_dev_get(struct cached_dev * dc)898*4882a593Smuzhiyun static inline bool cached_dev_get(struct cached_dev *dc)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun 	if (!refcount_inc_not_zero(&dc->count))
901*4882a593Smuzhiyun 		return false;
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	/* Paired with the mb in cached_dev_attach */
904*4882a593Smuzhiyun 	smp_mb__after_atomic();
905*4882a593Smuzhiyun 	return true;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun /*
909*4882a593Smuzhiyun  * bucket_gc_gen() returns the difference between the bucket's current gen and
910*4882a593Smuzhiyun  * the oldest gen of any pointer into that bucket in the btree (last_gc).
911*4882a593Smuzhiyun  */
912*4882a593Smuzhiyun 
bucket_gc_gen(struct bucket * b)913*4882a593Smuzhiyun static inline uint8_t bucket_gc_gen(struct bucket *b)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun 	return b->gen - b->last_gc;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun #define BUCKET_GC_GEN_MAX	96U
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun #define kobj_attribute_write(n, fn)					\
921*4882a593Smuzhiyun 	static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun #define kobj_attribute_rw(n, show, store)				\
924*4882a593Smuzhiyun 	static struct kobj_attribute ksysfs_##n =			\
925*4882a593Smuzhiyun 		__ATTR(n, 0600, show, store)
926*4882a593Smuzhiyun 
wake_up_allocators(struct cache_set * c)927*4882a593Smuzhiyun static inline void wake_up_allocators(struct cache_set *c)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	struct cache *ca = c->cache;
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	wake_up_process(ca->alloc_thread);
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun 
closure_bio_submit(struct cache_set * c,struct bio * bio,struct closure * cl)934*4882a593Smuzhiyun static inline void closure_bio_submit(struct cache_set *c,
935*4882a593Smuzhiyun 				      struct bio *bio,
936*4882a593Smuzhiyun 				      struct closure *cl)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun 	closure_get(cl);
939*4882a593Smuzhiyun 	if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
940*4882a593Smuzhiyun 		bio->bi_status = BLK_STS_IOERR;
941*4882a593Smuzhiyun 		bio_endio(bio);
942*4882a593Smuzhiyun 		return;
943*4882a593Smuzhiyun 	}
944*4882a593Smuzhiyun 	submit_bio_noacct(bio);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun /*
948*4882a593Smuzhiyun  * Prevent the kthread exits directly, and make sure when kthread_stop()
949*4882a593Smuzhiyun  * is called to stop a kthread, it is still alive. If a kthread might be
950*4882a593Smuzhiyun  * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
951*4882a593Smuzhiyun  * necessary before the kthread returns.
952*4882a593Smuzhiyun  */
wait_for_kthread_stop(void)953*4882a593Smuzhiyun static inline void wait_for_kthread_stop(void)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	while (!kthread_should_stop()) {
956*4882a593Smuzhiyun 		set_current_state(TASK_INTERRUPTIBLE);
957*4882a593Smuzhiyun 		schedule();
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun /* Forward declarations */
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
964*4882a593Smuzhiyun void bch_count_io_errors(struct cache *ca, blk_status_t error,
965*4882a593Smuzhiyun 			 int is_read, const char *m);
966*4882a593Smuzhiyun void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
967*4882a593Smuzhiyun 			      blk_status_t error, const char *m);
968*4882a593Smuzhiyun void bch_bbio_endio(struct cache_set *c, struct bio *bio,
969*4882a593Smuzhiyun 		    blk_status_t error, const char *m);
970*4882a593Smuzhiyun void bch_bbio_free(struct bio *bio, struct cache_set *c);
971*4882a593Smuzhiyun struct bio *bch_bbio_alloc(struct cache_set *c);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
974*4882a593Smuzhiyun void bch_submit_bbio(struct bio *bio, struct cache_set *c,
975*4882a593Smuzhiyun 		     struct bkey *k, unsigned int ptr);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
978*4882a593Smuzhiyun void bch_rescale_priorities(struct cache_set *c, int sectors);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
981*4882a593Smuzhiyun void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun void __bch_bucket_free(struct cache *ca, struct bucket *b);
984*4882a593Smuzhiyun void bch_bucket_free(struct cache_set *c, struct bkey *k);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
987*4882a593Smuzhiyun int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
988*4882a593Smuzhiyun 			   struct bkey *k, bool wait);
989*4882a593Smuzhiyun int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
990*4882a593Smuzhiyun 			 struct bkey *k, bool wait);
991*4882a593Smuzhiyun bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
992*4882a593Smuzhiyun 		       unsigned int sectors, unsigned int write_point,
993*4882a593Smuzhiyun 		       unsigned int write_prio, bool wait);
994*4882a593Smuzhiyun bool bch_cached_dev_error(struct cached_dev *dc);
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun __printf(2, 3)
997*4882a593Smuzhiyun bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun int bch_prio_write(struct cache *ca, bool wait);
1000*4882a593Smuzhiyun void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun extern struct workqueue_struct *bcache_wq;
1003*4882a593Smuzhiyun extern struct workqueue_struct *bch_journal_wq;
1004*4882a593Smuzhiyun extern struct workqueue_struct *bch_flush_wq;
1005*4882a593Smuzhiyun extern struct mutex bch_register_lock;
1006*4882a593Smuzhiyun extern struct list_head bch_cache_sets;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun extern struct kobj_type bch_cached_dev_ktype;
1009*4882a593Smuzhiyun extern struct kobj_type bch_flash_dev_ktype;
1010*4882a593Smuzhiyun extern struct kobj_type bch_cache_set_ktype;
1011*4882a593Smuzhiyun extern struct kobj_type bch_cache_set_internal_ktype;
1012*4882a593Smuzhiyun extern struct kobj_type bch_cache_ktype;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun void bch_cached_dev_release(struct kobject *kobj);
1015*4882a593Smuzhiyun void bch_flash_dev_release(struct kobject *kobj);
1016*4882a593Smuzhiyun void bch_cache_set_release(struct kobject *kobj);
1017*4882a593Smuzhiyun void bch_cache_release(struct kobject *kobj);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun int bch_uuid_write(struct cache_set *c);
1020*4882a593Smuzhiyun void bcache_write_super(struct cache_set *c);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1025*4882a593Smuzhiyun 			  uint8_t *set_uuid);
1026*4882a593Smuzhiyun void bch_cached_dev_detach(struct cached_dev *dc);
1027*4882a593Smuzhiyun int bch_cached_dev_run(struct cached_dev *dc);
1028*4882a593Smuzhiyun void bcache_device_stop(struct bcache_device *d);
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun void bch_cache_set_unregister(struct cache_set *c);
1031*4882a593Smuzhiyun void bch_cache_set_stop(struct cache_set *c);
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
1034*4882a593Smuzhiyun void bch_btree_cache_free(struct cache_set *c);
1035*4882a593Smuzhiyun int bch_btree_cache_alloc(struct cache_set *c);
1036*4882a593Smuzhiyun void bch_moving_init_cache_set(struct cache_set *c);
1037*4882a593Smuzhiyun int bch_open_buckets_alloc(struct cache_set *c);
1038*4882a593Smuzhiyun void bch_open_buckets_free(struct cache_set *c);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun int bch_cache_allocator_start(struct cache *ca);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun void bch_debug_exit(void);
1043*4882a593Smuzhiyun void bch_debug_init(void);
1044*4882a593Smuzhiyun void bch_request_exit(void);
1045*4882a593Smuzhiyun int bch_request_init(void);
1046*4882a593Smuzhiyun void bch_btree_exit(void);
1047*4882a593Smuzhiyun int bch_btree_init(void);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun #endif /* _BCACHE_H */
1050