1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * f2fs extent cache support
4 *
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
9 *
10 * block_age-based extent cache added by:
11 * Copyright (c) 2022 xiaomi Co., Ltd.
12 * http://www.xiaomi.com/
13 */
14
15 #include <linux/fs.h>
16 #include <linux/f2fs_fs.h>
17
18 #include "f2fs.h"
19 #include "node.h"
20 #include <trace/events/f2fs.h>
21
__set_extent_info(struct extent_info * ei,unsigned int fofs,unsigned int len,block_t blk,bool keep_clen,unsigned long age,unsigned long last_blocks,enum extent_type type)22 static void __set_extent_info(struct extent_info *ei,
23 unsigned int fofs, unsigned int len,
24 block_t blk, bool keep_clen,
25 unsigned long age, unsigned long last_blocks,
26 enum extent_type type)
27 {
28 ei->fofs = fofs;
29 ei->len = len;
30
31 if (type == EX_READ) {
32 ei->blk = blk;
33 if (keep_clen)
34 return;
35 #ifdef CONFIG_F2FS_FS_COMPRESSION
36 ei->c_len = 0;
37 #endif
38 } else if (type == EX_BLOCK_AGE) {
39 ei->age = age;
40 ei->last_blocks = last_blocks;
41 }
42 }
43
__may_read_extent_tree(struct inode * inode)44 static bool __may_read_extent_tree(struct inode *inode)
45 {
46 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
47
48 if (!test_opt(sbi, READ_EXTENT_CACHE))
49 return false;
50 if (is_inode_flag_set(inode, FI_NO_EXTENT))
51 return false;
52 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
53 !f2fs_sb_has_readonly(sbi))
54 return false;
55 return S_ISREG(inode->i_mode);
56 }
57
__may_age_extent_tree(struct inode * inode)58 static bool __may_age_extent_tree(struct inode *inode)
59 {
60 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
61
62 if (!test_opt(sbi, AGE_EXTENT_CACHE))
63 return false;
64 /* don't cache block age info for cold file */
65 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
66 return false;
67 if (file_is_cold(inode))
68 return false;
69
70 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
71 }
72
__init_may_extent_tree(struct inode * inode,enum extent_type type)73 static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
74 {
75 if (type == EX_READ)
76 return __may_read_extent_tree(inode);
77 else if (type == EX_BLOCK_AGE)
78 return __may_age_extent_tree(inode);
79 return false;
80 }
81
__may_extent_tree(struct inode * inode,enum extent_type type)82 static bool __may_extent_tree(struct inode *inode, enum extent_type type)
83 {
84 /*
85 * for recovered files during mount do not create extents
86 * if shrinker is not registered.
87 */
88 if (list_empty(&F2FS_I_SB(inode)->s_list))
89 return false;
90
91 return __init_may_extent_tree(inode, type);
92 }
93
__try_update_largest_extent(struct extent_tree * et,struct extent_node * en)94 static void __try_update_largest_extent(struct extent_tree *et,
95 struct extent_node *en)
96 {
97 if (et->type != EX_READ)
98 return;
99 if (en->ei.len <= et->largest.len)
100 return;
101
102 et->largest = en->ei;
103 et->largest_updated = true;
104 }
105
__is_extent_mergeable(struct extent_info * back,struct extent_info * front,enum extent_type type)106 static bool __is_extent_mergeable(struct extent_info *back,
107 struct extent_info *front, enum extent_type type)
108 {
109 if (type == EX_READ) {
110 #ifdef CONFIG_F2FS_FS_COMPRESSION
111 if (back->c_len && back->len != back->c_len)
112 return false;
113 if (front->c_len && front->len != front->c_len)
114 return false;
115 #endif
116 return (back->fofs + back->len == front->fofs &&
117 back->blk + back->len == front->blk);
118 } else if (type == EX_BLOCK_AGE) {
119 return (back->fofs + back->len == front->fofs &&
120 abs(back->age - front->age) <= SAME_AGE_REGION &&
121 abs(back->last_blocks - front->last_blocks) <=
122 SAME_AGE_REGION);
123 }
124 return false;
125 }
126
__is_back_mergeable(struct extent_info * cur,struct extent_info * back,enum extent_type type)127 static bool __is_back_mergeable(struct extent_info *cur,
128 struct extent_info *back, enum extent_type type)
129 {
130 return __is_extent_mergeable(back, cur, type);
131 }
132
__is_front_mergeable(struct extent_info * cur,struct extent_info * front,enum extent_type type)133 static bool __is_front_mergeable(struct extent_info *cur,
134 struct extent_info *front, enum extent_type type)
135 {
136 return __is_extent_mergeable(cur, front, type);
137 }
138
__lookup_rb_tree_fast(struct rb_entry * cached_re,unsigned int ofs)139 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
140 unsigned int ofs)
141 {
142 if (cached_re) {
143 if (cached_re->ofs <= ofs &&
144 cached_re->ofs + cached_re->len > ofs) {
145 return cached_re;
146 }
147 }
148 return NULL;
149 }
150
__lookup_rb_tree_slow(struct rb_root_cached * root,unsigned int ofs)151 static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
152 unsigned int ofs)
153 {
154 struct rb_node *node = root->rb_root.rb_node;
155 struct rb_entry *re;
156
157 while (node) {
158 re = rb_entry(node, struct rb_entry, rb_node);
159
160 if (ofs < re->ofs)
161 node = node->rb_left;
162 else if (ofs >= re->ofs + re->len)
163 node = node->rb_right;
164 else
165 return re;
166 }
167 return NULL;
168 }
169
f2fs_lookup_rb_tree(struct rb_root_cached * root,struct rb_entry * cached_re,unsigned int ofs)170 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
171 struct rb_entry *cached_re, unsigned int ofs)
172 {
173 struct rb_entry *re;
174
175 re = __lookup_rb_tree_fast(cached_re, ofs);
176 if (!re)
177 return __lookup_rb_tree_slow(root, ofs);
178
179 return re;
180 }
181
f2fs_lookup_rb_tree_ext(struct f2fs_sb_info * sbi,struct rb_root_cached * root,struct rb_node ** parent,unsigned long long key,bool * leftmost)182 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
183 struct rb_root_cached *root,
184 struct rb_node **parent,
185 unsigned long long key, bool *leftmost)
186 {
187 struct rb_node **p = &root->rb_root.rb_node;
188 struct rb_entry *re;
189
190 while (*p) {
191 *parent = *p;
192 re = rb_entry(*parent, struct rb_entry, rb_node);
193
194 if (key < re->key) {
195 p = &(*p)->rb_left;
196 } else {
197 p = &(*p)->rb_right;
198 *leftmost = false;
199 }
200 }
201
202 return p;
203 }
204
f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info * sbi,struct rb_root_cached * root,struct rb_node ** parent,unsigned int ofs,bool * leftmost)205 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
206 struct rb_root_cached *root,
207 struct rb_node **parent,
208 unsigned int ofs, bool *leftmost)
209 {
210 struct rb_node **p = &root->rb_root.rb_node;
211 struct rb_entry *re;
212
213 while (*p) {
214 *parent = *p;
215 re = rb_entry(*parent, struct rb_entry, rb_node);
216
217 if (ofs < re->ofs) {
218 p = &(*p)->rb_left;
219 } else if (ofs >= re->ofs + re->len) {
220 p = &(*p)->rb_right;
221 *leftmost = false;
222 } else {
223 f2fs_bug_on(sbi, 1);
224 }
225 }
226
227 return p;
228 }
229
230 /*
231 * lookup rb entry in position of @ofs in rb-tree,
232 * if hit, return the entry, otherwise, return NULL
233 * @prev_ex: extent before ofs
234 * @next_ex: extent after ofs
235 * @insert_p: insert point for new extent at ofs
236 * in order to simpfy the insertion after.
237 * tree must stay unchanged between lookup and insertion.
238 */
f2fs_lookup_rb_tree_ret(struct rb_root_cached * root,struct rb_entry * cached_re,unsigned int ofs,struct rb_entry ** prev_entry,struct rb_entry ** next_entry,struct rb_node *** insert_p,struct rb_node ** insert_parent,bool force,bool * leftmost)239 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
240 struct rb_entry *cached_re,
241 unsigned int ofs,
242 struct rb_entry **prev_entry,
243 struct rb_entry **next_entry,
244 struct rb_node ***insert_p,
245 struct rb_node **insert_parent,
246 bool force, bool *leftmost)
247 {
248 struct rb_node **pnode = &root->rb_root.rb_node;
249 struct rb_node *parent = NULL, *tmp_node;
250 struct rb_entry *re = cached_re;
251
252 *insert_p = NULL;
253 *insert_parent = NULL;
254 *prev_entry = NULL;
255 *next_entry = NULL;
256
257 if (RB_EMPTY_ROOT(&root->rb_root))
258 return NULL;
259
260 if (re) {
261 if (re->ofs <= ofs && re->ofs + re->len > ofs)
262 goto lookup_neighbors;
263 }
264
265 if (leftmost)
266 *leftmost = true;
267
268 while (*pnode) {
269 parent = *pnode;
270 re = rb_entry(*pnode, struct rb_entry, rb_node);
271
272 if (ofs < re->ofs) {
273 pnode = &(*pnode)->rb_left;
274 } else if (ofs >= re->ofs + re->len) {
275 pnode = &(*pnode)->rb_right;
276 if (leftmost)
277 *leftmost = false;
278 } else {
279 goto lookup_neighbors;
280 }
281 }
282
283 *insert_p = pnode;
284 *insert_parent = parent;
285
286 re = rb_entry(parent, struct rb_entry, rb_node);
287 tmp_node = parent;
288 if (parent && ofs > re->ofs)
289 tmp_node = rb_next(parent);
290 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
291
292 tmp_node = parent;
293 if (parent && ofs < re->ofs)
294 tmp_node = rb_prev(parent);
295 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
296 return NULL;
297
298 lookup_neighbors:
299 if (ofs == re->ofs || force) {
300 /* lookup prev node for merging backward later */
301 tmp_node = rb_prev(&re->rb_node);
302 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
303 }
304 if (ofs == re->ofs + re->len - 1 || force) {
305 /* lookup next node for merging frontward later */
306 tmp_node = rb_next(&re->rb_node);
307 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
308 }
309 return re;
310 }
311
f2fs_check_rb_tree_consistence(struct f2fs_sb_info * sbi,struct rb_root_cached * root,bool check_key)312 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
313 struct rb_root_cached *root, bool check_key)
314 {
315 #ifdef CONFIG_F2FS_CHECK_FS
316 struct rb_node *cur = rb_first_cached(root), *next;
317 struct rb_entry *cur_re, *next_re;
318
319 if (!cur)
320 return true;
321
322 while (cur) {
323 next = rb_next(cur);
324 if (!next)
325 return true;
326
327 cur_re = rb_entry(cur, struct rb_entry, rb_node);
328 next_re = rb_entry(next, struct rb_entry, rb_node);
329
330 if (check_key) {
331 if (cur_re->key > next_re->key) {
332 f2fs_info(sbi, "inconsistent rbtree, "
333 "cur(%llu) next(%llu)",
334 cur_re->key, next_re->key);
335 return false;
336 }
337 goto next;
338 }
339
340 if (cur_re->ofs + cur_re->len > next_re->ofs) {
341 f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
342 cur_re->ofs, cur_re->len,
343 next_re->ofs, next_re->len);
344 return false;
345 }
346 next:
347 cur = next;
348 }
349 #endif
350 return true;
351 }
352
353 static struct kmem_cache *extent_tree_slab;
354 static struct kmem_cache *extent_node_slab;
355
__attach_extent_node(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_info * ei,struct rb_node * parent,struct rb_node ** p,bool leftmost)356 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
357 struct extent_tree *et, struct extent_info *ei,
358 struct rb_node *parent, struct rb_node **p,
359 bool leftmost)
360 {
361 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
362 struct extent_node *en;
363
364 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
365 if (!en)
366 return NULL;
367
368 en->ei = *ei;
369 INIT_LIST_HEAD(&en->list);
370 en->et = et;
371
372 rb_link_node(&en->rb_node, parent, p);
373 rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
374 atomic_inc(&et->node_cnt);
375 atomic_inc(&eti->total_ext_node);
376 return en;
377 }
378
__detach_extent_node(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_node * en)379 static void __detach_extent_node(struct f2fs_sb_info *sbi,
380 struct extent_tree *et, struct extent_node *en)
381 {
382 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
383
384 rb_erase_cached(&en->rb_node, &et->root);
385 atomic_dec(&et->node_cnt);
386 atomic_dec(&eti->total_ext_node);
387
388 if (et->cached_en == en)
389 et->cached_en = NULL;
390 kmem_cache_free(extent_node_slab, en);
391 }
392
393 /*
394 * Flow to release an extent_node:
395 * 1. list_del_init
396 * 2. __detach_extent_node
397 * 3. kmem_cache_free.
398 */
__release_extent_node(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_node * en)399 static void __release_extent_node(struct f2fs_sb_info *sbi,
400 struct extent_tree *et, struct extent_node *en)
401 {
402 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
403
404 spin_lock(&eti->extent_lock);
405 f2fs_bug_on(sbi, list_empty(&en->list));
406 list_del_init(&en->list);
407 spin_unlock(&eti->extent_lock);
408
409 __detach_extent_node(sbi, et, en);
410 }
411
__grab_extent_tree(struct inode * inode,enum extent_type type)412 static struct extent_tree *__grab_extent_tree(struct inode *inode,
413 enum extent_type type)
414 {
415 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
416 struct extent_tree_info *eti = &sbi->extent_tree[type];
417 struct extent_tree *et;
418 nid_t ino = inode->i_ino;
419
420 mutex_lock(&eti->extent_tree_lock);
421 et = radix_tree_lookup(&eti->extent_tree_root, ino);
422 if (!et) {
423 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
424 f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
425 memset(et, 0, sizeof(struct extent_tree));
426 et->ino = ino;
427 et->type = type;
428 et->root = RB_ROOT_CACHED;
429 et->cached_en = NULL;
430 rwlock_init(&et->lock);
431 INIT_LIST_HEAD(&et->list);
432 atomic_set(&et->node_cnt, 0);
433 atomic_inc(&eti->total_ext_tree);
434 } else {
435 atomic_dec(&eti->total_zombie_tree);
436 list_del_init(&et->list);
437 }
438 mutex_unlock(&eti->extent_tree_lock);
439
440 /* never died until evict_inode */
441 F2FS_I(inode)->extent_tree[type] = et;
442
443 return et;
444 }
445
__free_extent_tree(struct f2fs_sb_info * sbi,struct extent_tree * et)446 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
447 struct extent_tree *et)
448 {
449 struct rb_node *node, *next;
450 struct extent_node *en;
451 unsigned int count = atomic_read(&et->node_cnt);
452
453 node = rb_first_cached(&et->root);
454 while (node) {
455 next = rb_next(node);
456 en = rb_entry(node, struct extent_node, rb_node);
457 __release_extent_node(sbi, et, en);
458 node = next;
459 }
460
461 return count - atomic_read(&et->node_cnt);
462 }
463
__drop_largest_extent(struct extent_tree * et,pgoff_t fofs,unsigned int len)464 static void __drop_largest_extent(struct extent_tree *et,
465 pgoff_t fofs, unsigned int len)
466 {
467 if (fofs < et->largest.fofs + et->largest.len &&
468 fofs + len > et->largest.fofs) {
469 et->largest.len = 0;
470 et->largest_updated = true;
471 }
472 }
473
f2fs_init_read_extent_tree(struct inode * inode,struct page * ipage)474 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
475 {
476 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
477 struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
478 struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
479 struct extent_tree *et;
480 struct extent_node *en;
481 struct extent_info ei;
482
483 if (!__may_extent_tree(inode, EX_READ)) {
484 /* drop largest read extent */
485 if (i_ext && i_ext->len) {
486 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
487 i_ext->len = 0;
488 set_page_dirty(ipage);
489 }
490 goto out;
491 }
492
493 et = __grab_extent_tree(inode, EX_READ);
494
495 if (!i_ext || !i_ext->len)
496 goto out;
497
498 get_read_extent_info(&ei, i_ext);
499
500 write_lock(&et->lock);
501 if (atomic_read(&et->node_cnt))
502 goto unlock_out;
503
504 en = __attach_extent_node(sbi, et, &ei, NULL,
505 &et->root.rb_root.rb_node, true);
506 if (en) {
507 et->largest = en->ei;
508 et->cached_en = en;
509
510 spin_lock(&eti->extent_lock);
511 list_add_tail(&en->list, &eti->extent_list);
512 spin_unlock(&eti->extent_lock);
513 }
514 unlock_out:
515 write_unlock(&et->lock);
516 out:
517 if (!F2FS_I(inode)->extent_tree[EX_READ])
518 set_inode_flag(inode, FI_NO_EXTENT);
519 }
520
f2fs_init_age_extent_tree(struct inode * inode)521 void f2fs_init_age_extent_tree(struct inode *inode)
522 {
523 if (!__init_may_extent_tree(inode, EX_BLOCK_AGE))
524 return;
525 __grab_extent_tree(inode, EX_BLOCK_AGE);
526 }
527
f2fs_init_extent_tree(struct inode * inode)528 void f2fs_init_extent_tree(struct inode *inode)
529 {
530 /* initialize read cache */
531 if (__init_may_extent_tree(inode, EX_READ))
532 __grab_extent_tree(inode, EX_READ);
533
534 /* initialize block age cache */
535 if (__init_may_extent_tree(inode, EX_BLOCK_AGE))
536 __grab_extent_tree(inode, EX_BLOCK_AGE);
537 }
538
__lookup_extent_tree(struct inode * inode,pgoff_t pgofs,struct extent_info * ei,enum extent_type type)539 static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
540 struct extent_info *ei, enum extent_type type)
541 {
542 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
543 struct extent_tree_info *eti = &sbi->extent_tree[type];
544 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
545 struct extent_node *en;
546 bool ret = false;
547
548 if (!et)
549 return false;
550
551 trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
552
553 read_lock(&et->lock);
554
555 if (type == EX_READ &&
556 et->largest.fofs <= pgofs &&
557 et->largest.fofs + et->largest.len > pgofs) {
558 *ei = et->largest;
559 ret = true;
560 stat_inc_largest_node_hit(sbi);
561 goto out;
562 }
563
564 en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
565 (struct rb_entry *)et->cached_en, pgofs);
566 if (!en)
567 goto out;
568
569 if (en == et->cached_en)
570 stat_inc_cached_node_hit(sbi, type);
571 else
572 stat_inc_rbtree_node_hit(sbi, type);
573
574 *ei = en->ei;
575 spin_lock(&eti->extent_lock);
576 if (!list_empty(&en->list)) {
577 list_move_tail(&en->list, &eti->extent_list);
578 et->cached_en = en;
579 }
580 spin_unlock(&eti->extent_lock);
581 ret = true;
582 out:
583 stat_inc_total_hit(sbi, type);
584 read_unlock(&et->lock);
585
586 if (type == EX_READ)
587 trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
588 else if (type == EX_BLOCK_AGE)
589 trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei);
590 return ret;
591 }
592
__try_merge_extent_node(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_info * ei,struct extent_node * prev_ex,struct extent_node * next_ex)593 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
594 struct extent_tree *et, struct extent_info *ei,
595 struct extent_node *prev_ex,
596 struct extent_node *next_ex)
597 {
598 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
599 struct extent_node *en = NULL;
600
601 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
602 prev_ex->ei.len += ei->len;
603 ei = &prev_ex->ei;
604 en = prev_ex;
605 }
606
607 if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
608 next_ex->ei.fofs = ei->fofs;
609 next_ex->ei.len += ei->len;
610 if (et->type == EX_READ)
611 next_ex->ei.blk = ei->blk;
612 if (en)
613 __release_extent_node(sbi, et, prev_ex);
614
615 en = next_ex;
616 }
617
618 if (!en)
619 return NULL;
620
621 __try_update_largest_extent(et, en);
622
623 spin_lock(&eti->extent_lock);
624 if (!list_empty(&en->list)) {
625 list_move_tail(&en->list, &eti->extent_list);
626 et->cached_en = en;
627 }
628 spin_unlock(&eti->extent_lock);
629 return en;
630 }
631
__insert_extent_tree(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_info * ei,struct rb_node ** insert_p,struct rb_node * insert_parent,bool leftmost)632 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
633 struct extent_tree *et, struct extent_info *ei,
634 struct rb_node **insert_p,
635 struct rb_node *insert_parent,
636 bool leftmost)
637 {
638 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
639 struct rb_node **p;
640 struct rb_node *parent = NULL;
641 struct extent_node *en = NULL;
642
643 if (insert_p && insert_parent) {
644 parent = insert_parent;
645 p = insert_p;
646 goto do_insert;
647 }
648
649 leftmost = true;
650
651 p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
652 ei->fofs, &leftmost);
653 do_insert:
654 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
655 if (!en)
656 return NULL;
657
658 __try_update_largest_extent(et, en);
659
660 /* update in global extent list */
661 spin_lock(&eti->extent_lock);
662 list_add_tail(&en->list, &eti->extent_list);
663 et->cached_en = en;
664 spin_unlock(&eti->extent_lock);
665 return en;
666 }
667
__update_extent_tree_range(struct inode * inode,struct extent_info * tei,enum extent_type type)668 static void __update_extent_tree_range(struct inode *inode,
669 struct extent_info *tei, enum extent_type type)
670 {
671 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
672 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
673 struct extent_node *en = NULL, *en1 = NULL;
674 struct extent_node *prev_en = NULL, *next_en = NULL;
675 struct extent_info ei, dei, prev;
676 struct rb_node **insert_p = NULL, *insert_parent = NULL;
677 unsigned int fofs = tei->fofs, len = tei->len;
678 unsigned int end = fofs + len;
679 bool updated = false;
680 bool leftmost = false;
681
682 if (!et)
683 return;
684
685 if (type == EX_READ)
686 trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
687 tei->blk, 0);
688 else if (type == EX_BLOCK_AGE)
689 trace_f2fs_update_age_extent_tree_range(inode, fofs, len,
690 tei->age, tei->last_blocks);
691
692 write_lock(&et->lock);
693
694 if (type == EX_READ) {
695 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
696 write_unlock(&et->lock);
697 return;
698 }
699
700 prev = et->largest;
701 dei.len = 0;
702
703 /*
704 * drop largest extent before lookup, in case it's already
705 * been shrunk from extent tree
706 */
707 __drop_largest_extent(et, fofs, len);
708 }
709
710 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
711 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
712 (struct rb_entry *)et->cached_en, fofs,
713 (struct rb_entry **)&prev_en,
714 (struct rb_entry **)&next_en,
715 &insert_p, &insert_parent, false,
716 &leftmost);
717 if (!en)
718 en = next_en;
719
720 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
721 while (en && en->ei.fofs < end) {
722 unsigned int org_end;
723 int parts = 0; /* # of parts current extent split into */
724
725 next_en = en1 = NULL;
726
727 dei = en->ei;
728 org_end = dei.fofs + dei.len;
729 f2fs_bug_on(sbi, fofs >= org_end);
730
731 if (fofs > dei.fofs && (type != EX_READ ||
732 fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
733 en->ei.len = fofs - en->ei.fofs;
734 prev_en = en;
735 parts = 1;
736 }
737
738 if (end < org_end && (type != EX_READ ||
739 org_end - end >= F2FS_MIN_EXTENT_LEN)) {
740 if (parts) {
741 __set_extent_info(&ei,
742 end, org_end - end,
743 end - dei.fofs + dei.blk, false,
744 dei.age, dei.last_blocks,
745 type);
746 en1 = __insert_extent_tree(sbi, et, &ei,
747 NULL, NULL, true);
748 next_en = en1;
749 } else {
750 __set_extent_info(&en->ei,
751 end, en->ei.len - (end - dei.fofs),
752 en->ei.blk + (end - dei.fofs), true,
753 dei.age, dei.last_blocks,
754 type);
755 next_en = en;
756 }
757 parts++;
758 }
759
760 if (!next_en) {
761 struct rb_node *node = rb_next(&en->rb_node);
762
763 next_en = rb_entry_safe(node, struct extent_node,
764 rb_node);
765 }
766
767 if (parts)
768 __try_update_largest_extent(et, en);
769 else
770 __release_extent_node(sbi, et, en);
771
772 /*
773 * if original extent is split into zero or two parts, extent
774 * tree has been altered by deletion or insertion, therefore
775 * invalidate pointers regard to tree.
776 */
777 if (parts != 1) {
778 insert_p = NULL;
779 insert_parent = NULL;
780 }
781 en = next_en;
782 }
783
784 if (type == EX_BLOCK_AGE)
785 goto update_age_extent_cache;
786
787 /* 3. update extent in read extent cache */
788 BUG_ON(type != EX_READ);
789
790 if (tei->blk) {
791 __set_extent_info(&ei, fofs, len, tei->blk, false,
792 0, 0, EX_READ);
793 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
794 __insert_extent_tree(sbi, et, &ei,
795 insert_p, insert_parent, leftmost);
796
797 /* give up extent_cache, if split and small updates happen */
798 if (dei.len >= 1 &&
799 prev.len < F2FS_MIN_EXTENT_LEN &&
800 et->largest.len < F2FS_MIN_EXTENT_LEN) {
801 et->largest.len = 0;
802 et->largest_updated = true;
803 set_inode_flag(inode, FI_NO_EXTENT);
804 }
805 }
806
807 if (is_inode_flag_set(inode, FI_NO_EXTENT))
808 __free_extent_tree(sbi, et);
809
810 if (et->largest_updated) {
811 et->largest_updated = false;
812 updated = true;
813 }
814 goto out_read_extent_cache;
815 update_age_extent_cache:
816 if (!tei->last_blocks)
817 goto out_read_extent_cache;
818
819 __set_extent_info(&ei, fofs, len, 0, false,
820 tei->age, tei->last_blocks, EX_BLOCK_AGE);
821 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
822 __insert_extent_tree(sbi, et, &ei,
823 insert_p, insert_parent, leftmost);
824 out_read_extent_cache:
825 write_unlock(&et->lock);
826
827 if (updated)
828 f2fs_mark_inode_dirty_sync(inode, true);
829 }
830
831 #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_update_read_extent_tree_range_compressed(struct inode * inode,pgoff_t fofs,block_t blkaddr,unsigned int llen,unsigned int c_len)832 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
833 pgoff_t fofs, block_t blkaddr, unsigned int llen,
834 unsigned int c_len)
835 {
836 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
837 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
838 struct extent_node *en = NULL;
839 struct extent_node *prev_en = NULL, *next_en = NULL;
840 struct extent_info ei;
841 struct rb_node **insert_p = NULL, *insert_parent = NULL;
842 bool leftmost = false;
843
844 trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
845 blkaddr, c_len);
846
847 /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
848 if (is_inode_flag_set(inode, FI_NO_EXTENT))
849 return;
850
851 write_lock(&et->lock);
852
853 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
854 (struct rb_entry *)et->cached_en, fofs,
855 (struct rb_entry **)&prev_en,
856 (struct rb_entry **)&next_en,
857 &insert_p, &insert_parent, false,
858 &leftmost);
859 if (en)
860 goto unlock_out;
861
862 __set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ);
863 ei.c_len = c_len;
864
865 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
866 __insert_extent_tree(sbi, et, &ei,
867 insert_p, insert_parent, leftmost);
868 unlock_out:
869 write_unlock(&et->lock);
870 }
871 #endif
872
__calculate_block_age(struct f2fs_sb_info * sbi,unsigned long long new,unsigned long long old)873 static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi,
874 unsigned long long new,
875 unsigned long long old)
876 {
877 unsigned int rem_old, rem_new;
878 unsigned long long res;
879 unsigned int weight = sbi->last_age_weight;
880
881 res = div_u64_rem(new, 100, &rem_new) * (100 - weight)
882 + div_u64_rem(old, 100, &rem_old) * weight;
883
884 if (rem_new)
885 res += rem_new * (100 - weight) / 100;
886 if (rem_old)
887 res += rem_old * weight / 100;
888
889 return res;
890 }
891
892 /* This returns a new age and allocated blocks in ei */
__get_new_block_age(struct inode * inode,struct extent_info * ei,block_t blkaddr)893 static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
894 block_t blkaddr)
895 {
896 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
897 loff_t f_size = i_size_read(inode);
898 unsigned long long cur_blocks =
899 atomic64_read(&sbi->allocated_data_blocks);
900 struct extent_info tei = *ei; /* only fofs and len are valid */
901
902 /*
903 * When I/O is not aligned to a PAGE_SIZE, update will happen to the last
904 * file block even in seq write. So don't record age for newly last file
905 * block here.
906 */
907 if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
908 blkaddr == NEW_ADDR)
909 return -EINVAL;
910
911 if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) {
912 unsigned long long cur_age;
913
914 if (cur_blocks >= tei.last_blocks)
915 cur_age = cur_blocks - tei.last_blocks;
916 else
917 /* allocated_data_blocks overflow */
918 cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
919
920 if (tei.age)
921 ei->age = __calculate_block_age(sbi, cur_age, tei.age);
922 else
923 ei->age = cur_age;
924 ei->last_blocks = cur_blocks;
925 WARN_ON(ei->age > cur_blocks);
926 return 0;
927 }
928
929 f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
930
931 /* the data block was allocated for the first time */
932 if (blkaddr == NEW_ADDR)
933 goto out;
934
935 if (__is_valid_data_blkaddr(blkaddr) &&
936 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
937 f2fs_bug_on(sbi, 1);
938 return -EINVAL;
939 }
940 out:
941 /*
942 * init block age with zero, this can happen when the block age extent
943 * was reclaimed due to memory constraint or system reboot
944 */
945 ei->age = 0;
946 ei->last_blocks = cur_blocks;
947 return 0;
948 }
949
__update_extent_cache(struct dnode_of_data * dn,enum extent_type type)950 static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
951 {
952 struct extent_info ei = {};
953
954 if (!__may_extent_tree(dn->inode, type))
955 return;
956
957 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
958 dn->ofs_in_node;
959 ei.len = 1;
960
961 if (type == EX_READ) {
962 if (dn->data_blkaddr == NEW_ADDR)
963 ei.blk = NULL_ADDR;
964 else
965 ei.blk = dn->data_blkaddr;
966 } else if (type == EX_BLOCK_AGE) {
967 if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr))
968 return;
969 }
970 __update_extent_tree_range(dn->inode, &ei, type);
971 }
972
__shrink_extent_tree(struct f2fs_sb_info * sbi,int nr_shrink,enum extent_type type)973 static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
974 enum extent_type type)
975 {
976 struct extent_tree_info *eti = &sbi->extent_tree[type];
977 struct extent_tree *et, *next;
978 struct extent_node *en;
979 unsigned int node_cnt = 0, tree_cnt = 0;
980 int remained;
981
982 if (!atomic_read(&eti->total_zombie_tree))
983 goto free_node;
984
985 if (!mutex_trylock(&eti->extent_tree_lock))
986 goto out;
987
988 /* 1. remove unreferenced extent tree */
989 list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
990 if (atomic_read(&et->node_cnt)) {
991 write_lock(&et->lock);
992 node_cnt += __free_extent_tree(sbi, et);
993 write_unlock(&et->lock);
994 }
995 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
996 list_del_init(&et->list);
997 radix_tree_delete(&eti->extent_tree_root, et->ino);
998 kmem_cache_free(extent_tree_slab, et);
999 atomic_dec(&eti->total_ext_tree);
1000 atomic_dec(&eti->total_zombie_tree);
1001 tree_cnt++;
1002
1003 if (node_cnt + tree_cnt >= nr_shrink)
1004 goto unlock_out;
1005 cond_resched();
1006 }
1007 mutex_unlock(&eti->extent_tree_lock);
1008
1009 free_node:
1010 /* 2. remove LRU extent entries */
1011 if (!mutex_trylock(&eti->extent_tree_lock))
1012 goto out;
1013
1014 remained = nr_shrink - (node_cnt + tree_cnt);
1015
1016 spin_lock(&eti->extent_lock);
1017 for (; remained > 0; remained--) {
1018 if (list_empty(&eti->extent_list))
1019 break;
1020 en = list_first_entry(&eti->extent_list,
1021 struct extent_node, list);
1022 et = en->et;
1023 if (!write_trylock(&et->lock)) {
1024 /* refresh this extent node's position in extent list */
1025 list_move_tail(&en->list, &eti->extent_list);
1026 continue;
1027 }
1028
1029 list_del_init(&en->list);
1030 spin_unlock(&eti->extent_lock);
1031
1032 __detach_extent_node(sbi, et, en);
1033
1034 write_unlock(&et->lock);
1035 node_cnt++;
1036 spin_lock(&eti->extent_lock);
1037 }
1038 spin_unlock(&eti->extent_lock);
1039
1040 unlock_out:
1041 mutex_unlock(&eti->extent_tree_lock);
1042 out:
1043 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
1044
1045 return node_cnt + tree_cnt;
1046 }
1047
1048 /* read extent cache operations */
f2fs_lookup_read_extent_cache(struct inode * inode,pgoff_t pgofs,struct extent_info * ei)1049 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
1050 struct extent_info *ei)
1051 {
1052 if (!__may_extent_tree(inode, EX_READ))
1053 return false;
1054
1055 return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
1056 }
1057
f2fs_update_read_extent_cache(struct dnode_of_data * dn)1058 void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
1059 {
1060 return __update_extent_cache(dn, EX_READ);
1061 }
1062
f2fs_update_read_extent_cache_range(struct dnode_of_data * dn,pgoff_t fofs,block_t blkaddr,unsigned int len)1063 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
1064 pgoff_t fofs, block_t blkaddr, unsigned int len)
1065 {
1066 struct extent_info ei = {
1067 .fofs = fofs,
1068 .len = len,
1069 .blk = blkaddr,
1070 };
1071
1072 if (!__may_extent_tree(dn->inode, EX_READ))
1073 return;
1074
1075 __update_extent_tree_range(dn->inode, &ei, EX_READ);
1076 }
1077
f2fs_shrink_read_extent_tree(struct f2fs_sb_info * sbi,int nr_shrink)1078 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1079 {
1080 if (!test_opt(sbi, READ_EXTENT_CACHE))
1081 return 0;
1082
1083 return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
1084 }
1085
1086 /* block age extent cache operations */
f2fs_lookup_age_extent_cache(struct inode * inode,pgoff_t pgofs,struct extent_info * ei)1087 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
1088 struct extent_info *ei)
1089 {
1090 if (!__may_extent_tree(inode, EX_BLOCK_AGE))
1091 return false;
1092
1093 return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE);
1094 }
1095
f2fs_update_age_extent_cache(struct dnode_of_data * dn)1096 void f2fs_update_age_extent_cache(struct dnode_of_data *dn)
1097 {
1098 return __update_extent_cache(dn, EX_BLOCK_AGE);
1099 }
1100
f2fs_update_age_extent_cache_range(struct dnode_of_data * dn,pgoff_t fofs,unsigned int len)1101 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
1102 pgoff_t fofs, unsigned int len)
1103 {
1104 struct extent_info ei = {
1105 .fofs = fofs,
1106 .len = len,
1107 };
1108
1109 if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE))
1110 return;
1111
1112 __update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE);
1113 }
1114
f2fs_shrink_age_extent_tree(struct f2fs_sb_info * sbi,int nr_shrink)1115 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1116 {
1117 if (!test_opt(sbi, AGE_EXTENT_CACHE))
1118 return 0;
1119
1120 return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
1121 }
1122
__destroy_extent_node(struct inode * inode,enum extent_type type)1123 static unsigned int __destroy_extent_node(struct inode *inode,
1124 enum extent_type type)
1125 {
1126 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1127 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1128 unsigned int node_cnt = 0;
1129
1130 if (!et || !atomic_read(&et->node_cnt))
1131 return 0;
1132
1133 write_lock(&et->lock);
1134 node_cnt = __free_extent_tree(sbi, et);
1135 write_unlock(&et->lock);
1136
1137 return node_cnt;
1138 }
1139
f2fs_destroy_extent_node(struct inode * inode)1140 void f2fs_destroy_extent_node(struct inode *inode)
1141 {
1142 __destroy_extent_node(inode, EX_READ);
1143 __destroy_extent_node(inode, EX_BLOCK_AGE);
1144 }
1145
__drop_extent_tree(struct inode * inode,enum extent_type type)1146 static void __drop_extent_tree(struct inode *inode, enum extent_type type)
1147 {
1148 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1149 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1150 bool updated = false;
1151
1152 if (!__may_extent_tree(inode, type))
1153 return;
1154
1155 write_lock(&et->lock);
1156 set_inode_flag(inode, FI_NO_EXTENT);
1157 __free_extent_tree(sbi, et);
1158 if (type == EX_READ) {
1159 set_inode_flag(inode, FI_NO_EXTENT);
1160 if (et->largest.len) {
1161 et->largest.len = 0;
1162 updated = true;
1163 }
1164 }
1165 write_unlock(&et->lock);
1166 if (updated)
1167 f2fs_mark_inode_dirty_sync(inode, true);
1168 }
1169
f2fs_drop_extent_tree(struct inode * inode)1170 void f2fs_drop_extent_tree(struct inode *inode)
1171 {
1172 __drop_extent_tree(inode, EX_READ);
1173 __drop_extent_tree(inode, EX_BLOCK_AGE);
1174 }
1175
__destroy_extent_tree(struct inode * inode,enum extent_type type)1176 static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
1177 {
1178 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1179 struct extent_tree_info *eti = &sbi->extent_tree[type];
1180 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1181 unsigned int node_cnt = 0;
1182
1183 if (!et)
1184 return;
1185
1186 if (inode->i_nlink && !is_bad_inode(inode) &&
1187 atomic_read(&et->node_cnt)) {
1188 mutex_lock(&eti->extent_tree_lock);
1189 list_add_tail(&et->list, &eti->zombie_list);
1190 atomic_inc(&eti->total_zombie_tree);
1191 mutex_unlock(&eti->extent_tree_lock);
1192 return;
1193 }
1194
1195 /* free all extent info belong to this extent tree */
1196 node_cnt = __destroy_extent_node(inode, type);
1197
1198 /* delete extent tree entry in radix tree */
1199 mutex_lock(&eti->extent_tree_lock);
1200 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
1201 radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
1202 kmem_cache_free(extent_tree_slab, et);
1203 atomic_dec(&eti->total_ext_tree);
1204 mutex_unlock(&eti->extent_tree_lock);
1205
1206 F2FS_I(inode)->extent_tree[type] = NULL;
1207
1208 trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
1209 }
1210
f2fs_destroy_extent_tree(struct inode * inode)1211 void f2fs_destroy_extent_tree(struct inode *inode)
1212 {
1213 __destroy_extent_tree(inode, EX_READ);
1214 __destroy_extent_tree(inode, EX_BLOCK_AGE);
1215 }
1216
__init_extent_tree_info(struct extent_tree_info * eti)1217 static void __init_extent_tree_info(struct extent_tree_info *eti)
1218 {
1219 INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
1220 mutex_init(&eti->extent_tree_lock);
1221 INIT_LIST_HEAD(&eti->extent_list);
1222 spin_lock_init(&eti->extent_lock);
1223 atomic_set(&eti->total_ext_tree, 0);
1224 INIT_LIST_HEAD(&eti->zombie_list);
1225 atomic_set(&eti->total_zombie_tree, 0);
1226 atomic_set(&eti->total_ext_node, 0);
1227 }
1228
f2fs_init_extent_cache_info(struct f2fs_sb_info * sbi)1229 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
1230 {
1231 __init_extent_tree_info(&sbi->extent_tree[EX_READ]);
1232 __init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]);
1233
1234 /* initialize for block age extents */
1235 atomic64_set(&sbi->allocated_data_blocks, 0);
1236 sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
1237 sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
1238 sbi->last_age_weight = LAST_AGE_WEIGHT;
1239 }
1240
f2fs_create_extent_cache(void)1241 int __init f2fs_create_extent_cache(void)
1242 {
1243 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1244 sizeof(struct extent_tree));
1245 if (!extent_tree_slab)
1246 return -ENOMEM;
1247 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1248 sizeof(struct extent_node));
1249 if (!extent_node_slab) {
1250 kmem_cache_destroy(extent_tree_slab);
1251 return -ENOMEM;
1252 }
1253 return 0;
1254 }
1255
f2fs_destroy_extent_cache(void)1256 void f2fs_destroy_extent_cache(void)
1257 {
1258 kmem_cache_destroy(extent_node_slab);
1259 kmem_cache_destroy(extent_tree_slab);
1260 }
1261