Lines Matching refs:bdi

48 	struct backing_dev_info *bdi = m->private;  in bdi_debug_stats_show()  local
49 struct bdi_writeback *wb = &bdi->wb; in bdi_debug_stats_show()
100 !list_empty(&bdi->bdi_list), bdi->wb.state); in bdi_debug_stats_show()
107 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) in bdi_debug_register() argument
109 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); in bdi_debug_register()
111 debugfs_create_file("stats", 0444, bdi->debug_dir, bdi, in bdi_debug_register()
115 static void bdi_debug_unregister(struct backing_dev_info *bdi) in bdi_debug_unregister() argument
117 debugfs_remove_recursive(bdi->debug_dir); in bdi_debug_unregister()
123 static inline void bdi_debug_register(struct backing_dev_info *bdi, in bdi_debug_register() argument
127 static inline void bdi_debug_unregister(struct backing_dev_info *bdi) in bdi_debug_unregister() argument
136 struct backing_dev_info *bdi = dev_get_drvdata(dev); in read_ahead_kb_store() local
144 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); in read_ahead_kb_store()
155 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
161 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
166 struct backing_dev_info *bdi = dev_get_drvdata(dev); in min_ratio_store() local
174 ret = bdi_set_min_ratio(bdi, ratio); in min_ratio_store()
180 BDI_SHOW(min_ratio, bdi->min_ratio)
185 struct backing_dev_info *bdi = dev_get_drvdata(dev); in max_ratio_store() local
193 ret = bdi_set_max_ratio(bdi, ratio); in max_ratio_store()
199 BDI_SHOW(max_ratio, bdi->max_ratio)
233 static int bdi_init(struct backing_dev_info *bdi);
280 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, in wb_init() argument
287 if (wb != &bdi->wb) in wb_init()
288 bdi_get(bdi); in wb_init()
289 wb->bdi = bdi; in wb_init()
325 if (wb != &bdi->wb) in wb_init()
326 bdi_put(bdi); in wb_init()
366 if (wb != &wb->bdi->wb) in wb_exit()
367 bdi_put(wb->bdi); in wb_exit()
387 mutex_lock(&wb->bdi->cgwb_release_mutex); in cgwb_release_workfn()
392 mutex_unlock(&wb->bdi->cgwb_release_mutex); in cgwb_release_workfn()
414 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); in cgwb_kill()
427 static int cgwb_create(struct backing_dev_info *bdi, in cgwb_create() argument
446 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); in cgwb_create()
462 ret = wb_init(wb, bdi, gfp); in cgwb_create()
487 if (test_bit(WB_registered, &bdi->wb.state) && in cgwb_create()
490 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); in cgwb_create()
492 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); in cgwb_create()
544 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, in wb_get_lookup() argument
550 return &bdi->wb; in wb_get_lookup()
553 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); in wb_get_lookup()
577 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, in wb_get_create() argument
586 return &bdi->wb; in wb_get_create()
589 wb = wb_get_lookup(bdi, memcg_css); in wb_get_create()
590 } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); in wb_get_create()
595 static int cgwb_bdi_init(struct backing_dev_info *bdi) in cgwb_bdi_init() argument
599 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); in cgwb_bdi_init()
600 mutex_init(&bdi->cgwb_release_mutex); in cgwb_bdi_init()
601 init_rwsem(&bdi->wb_switch_rwsem); in cgwb_bdi_init()
603 ret = wb_init(&bdi->wb, bdi, GFP_KERNEL); in cgwb_bdi_init()
605 bdi->wb.memcg_css = &root_mem_cgroup->css; in cgwb_bdi_init()
606 bdi->wb.blkcg_css = blkcg_root_css; in cgwb_bdi_init()
611 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) in cgwb_bdi_unregister() argument
617 WARN_ON(test_bit(WB_registered, &bdi->wb.state)); in cgwb_bdi_unregister()
620 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) in cgwb_bdi_unregister()
624 mutex_lock(&bdi->cgwb_release_mutex); in cgwb_bdi_unregister()
626 while (!list_empty(&bdi->wb_list)) { in cgwb_bdi_unregister()
627 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, in cgwb_bdi_unregister()
634 mutex_unlock(&bdi->cgwb_release_mutex); in cgwb_bdi_unregister()
672 static void cgwb_bdi_register(struct backing_dev_info *bdi) in cgwb_bdi_register() argument
675 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); in cgwb_bdi_register()
696 static int cgwb_bdi_init(struct backing_dev_info *bdi) in cgwb_bdi_init() argument
698 return wb_init(&bdi->wb, bdi, GFP_KERNEL); in cgwb_bdi_init()
701 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } in cgwb_bdi_unregister() argument
703 static void cgwb_bdi_register(struct backing_dev_info *bdi) in cgwb_bdi_register() argument
705 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); in cgwb_bdi_register()
715 static int bdi_init(struct backing_dev_info *bdi) in bdi_init() argument
719 bdi->dev = NULL; in bdi_init()
721 kref_init(&bdi->refcnt); in bdi_init()
722 bdi->min_ratio = 0; in bdi_init()
723 bdi->max_ratio = 100; in bdi_init()
724 bdi->max_prop_frac = FPROP_FRAC_BASE; in bdi_init()
725 INIT_LIST_HEAD(&bdi->bdi_list); in bdi_init()
726 INIT_LIST_HEAD(&bdi->wb_list); in bdi_init()
727 init_waitqueue_head(&bdi->wb_waitq); in bdi_init()
729 ret = cgwb_bdi_init(bdi); in bdi_init()
736 struct backing_dev_info *bdi; in bdi_alloc() local
738 bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id); in bdi_alloc()
739 if (!bdi) in bdi_alloc()
742 if (bdi_init(bdi)) { in bdi_alloc()
743 kfree(bdi); in bdi_alloc()
746 bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT; in bdi_alloc()
747 bdi->ra_pages = VM_READAHEAD_PAGES; in bdi_alloc()
748 bdi->io_pages = VM_READAHEAD_PAGES; in bdi_alloc()
749 return bdi; in bdi_alloc()
757 struct backing_dev_info *bdi; in bdi_lookup_rb_node() local
763 bdi = rb_entry(parent, struct backing_dev_info, rb_node); in bdi_lookup_rb_node()
765 if (bdi->id > id) in bdi_lookup_rb_node()
767 else if (bdi->id < id) in bdi_lookup_rb_node()
787 struct backing_dev_info *bdi = NULL; in bdi_get_by_id() local
793 bdi = rb_entry(*p, struct backing_dev_info, rb_node); in bdi_get_by_id()
794 bdi_get(bdi); in bdi_get_by_id()
798 return bdi; in bdi_get_by_id()
801 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) in bdi_register_va() argument
806 if (bdi->dev) /* The driver needs to use separate queues per device */ in bdi_register_va()
809 vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); in bdi_register_va()
810 dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); in bdi_register_va()
814 cgwb_bdi_register(bdi); in bdi_register_va()
815 bdi->dev = dev; in bdi_register_va()
817 bdi_debug_register(bdi, dev_name(dev)); in bdi_register_va()
818 set_bit(WB_registered, &bdi->wb.state); in bdi_register_va()
822 bdi->id = ++bdi_id_cursor; in bdi_register_va()
824 p = bdi_lookup_rb_node(bdi->id, &parent); in bdi_register_va()
825 rb_link_node(&bdi->rb_node, parent, p); in bdi_register_va()
826 rb_insert_color(&bdi->rb_node, &bdi_tree); in bdi_register_va()
828 list_add_tail_rcu(&bdi->bdi_list, &bdi_list); in bdi_register_va()
832 trace_writeback_bdi_register(bdi); in bdi_register_va()
836 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...) in bdi_register() argument
842 ret = bdi_register_va(bdi, fmt, args); in bdi_register()
848 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner) in bdi_set_owner() argument
850 WARN_ON_ONCE(bdi->owner); in bdi_set_owner()
851 bdi->owner = owner; in bdi_set_owner()
858 static void bdi_remove_from_list(struct backing_dev_info *bdi) in bdi_remove_from_list() argument
861 rb_erase(&bdi->rb_node, &bdi_tree); in bdi_remove_from_list()
862 list_del_rcu(&bdi->bdi_list); in bdi_remove_from_list()
868 void bdi_unregister(struct backing_dev_info *bdi) in bdi_unregister() argument
871 bdi_remove_from_list(bdi); in bdi_unregister()
872 wb_shutdown(&bdi->wb); in bdi_unregister()
873 cgwb_bdi_unregister(bdi); in bdi_unregister()
879 if (bdi->min_ratio) in bdi_unregister()
880 bdi_set_min_ratio(bdi, 0); in bdi_unregister()
882 if (bdi->dev) { in bdi_unregister()
883 bdi_debug_unregister(bdi); in bdi_unregister()
884 device_unregister(bdi->dev); in bdi_unregister()
885 bdi->dev = NULL; in bdi_unregister()
888 if (bdi->owner) { in bdi_unregister()
889 put_device(bdi->owner); in bdi_unregister()
890 bdi->owner = NULL; in bdi_unregister()
896 struct backing_dev_info *bdi = in release_bdi() local
899 if (test_bit(WB_registered, &bdi->wb.state)) in release_bdi()
900 bdi_unregister(bdi); in release_bdi()
901 WARN_ON_ONCE(bdi->dev); in release_bdi()
902 wb_exit(&bdi->wb); in release_bdi()
903 kfree(bdi); in release_bdi()
906 void bdi_put(struct backing_dev_info *bdi) in bdi_put() argument
908 kref_put(&bdi->refcnt, release_bdi); in bdi_put()
912 const char *bdi_dev_name(struct backing_dev_info *bdi) in bdi_dev_name() argument
914 if (!bdi || !bdi->dev) in bdi_dev_name()
916 return bdi->dev_name; in bdi_dev_name()
926 void clear_bdi_congested(struct backing_dev_info *bdi, int sync) in clear_bdi_congested() argument
932 if (test_and_clear_bit(bit, &bdi->wb.congested)) in clear_bdi_congested()
940 void set_bdi_congested(struct backing_dev_info *bdi, int sync) in set_bdi_congested() argument
945 if (!test_and_set_bit(bit, &bdi->wb.congested)) in set_bdi_congested()