xref: /OK3568_Linux_fs/kernel/drivers/md/bcache/closure.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Asynchronous refcounty things
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6*4882a593Smuzhiyun  * Copyright 2012 Google, Inc.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/debugfs.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/seq_file.h>
12*4882a593Smuzhiyun #include <linux/sched/debug.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "closure.h"
15*4882a593Smuzhiyun 
closure_put_after_sub(struct closure * cl,int flags)16*4882a593Smuzhiyun static inline void closure_put_after_sub(struct closure *cl, int flags)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	int r = flags & CLOSURE_REMAINING_MASK;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	BUG_ON(flags & CLOSURE_GUARD_MASK);
21*4882a593Smuzhiyun 	BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	if (!r) {
24*4882a593Smuzhiyun 		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
25*4882a593Smuzhiyun 			atomic_set(&cl->remaining,
26*4882a593Smuzhiyun 				   CLOSURE_REMAINING_INITIALIZER);
27*4882a593Smuzhiyun 			closure_queue(cl);
28*4882a593Smuzhiyun 		} else {
29*4882a593Smuzhiyun 			struct closure *parent = cl->parent;
30*4882a593Smuzhiyun 			closure_fn *destructor = cl->fn;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 			closure_debug_destroy(cl);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 			if (destructor)
35*4882a593Smuzhiyun 				destructor(cl);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 			if (parent)
38*4882a593Smuzhiyun 				closure_put(parent);
39*4882a593Smuzhiyun 		}
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* For clearing flags with the same atomic op as a put */
closure_sub(struct closure * cl,int v)44*4882a593Smuzhiyun void closure_sub(struct closure *cl, int v)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun  * closure_put - decrement a closure's refcount
51*4882a593Smuzhiyun  */
closure_put(struct closure * cl)52*4882a593Smuzhiyun void closure_put(struct closure *cl)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * closure_wake_up - wake up all closures on a wait list, without memory barrier
59*4882a593Smuzhiyun  */
__closure_wake_up(struct closure_waitlist * wait_list)60*4882a593Smuzhiyun void __closure_wake_up(struct closure_waitlist *wait_list)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct llist_node *list;
63*4882a593Smuzhiyun 	struct closure *cl, *t;
64*4882a593Smuzhiyun 	struct llist_node *reverse = NULL;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	list = llist_del_all(&wait_list->list);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* We first reverse the list to preserve FIFO ordering and fairness */
69*4882a593Smuzhiyun 	reverse = llist_reverse_order(list);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	/* Then do the wakeups */
72*4882a593Smuzhiyun 	llist_for_each_entry_safe(cl, t, reverse, list) {
73*4882a593Smuzhiyun 		closure_set_waiting(cl, 0);
74*4882a593Smuzhiyun 		closure_sub(cl, CLOSURE_WAITING + 1);
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun  * closure_wait - add a closure to a waitlist
80*4882a593Smuzhiyun  * @waitlist: will own a ref on @cl, which will be released when
81*4882a593Smuzhiyun  * closure_wake_up() is called on @waitlist.
82*4882a593Smuzhiyun  * @cl: closure pointer.
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  */
closure_wait(struct closure_waitlist * waitlist,struct closure * cl)85*4882a593Smuzhiyun bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
88*4882a593Smuzhiyun 		return false;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	closure_set_waiting(cl, _RET_IP_);
91*4882a593Smuzhiyun 	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
92*4882a593Smuzhiyun 	llist_add(&cl->list, &waitlist->list);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	return true;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun struct closure_syncer {
98*4882a593Smuzhiyun 	struct task_struct	*task;
99*4882a593Smuzhiyun 	int			done;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
closure_sync_fn(struct closure * cl)102*4882a593Smuzhiyun static void closure_sync_fn(struct closure *cl)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	struct closure_syncer *s = cl->s;
105*4882a593Smuzhiyun 	struct task_struct *p;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	rcu_read_lock();
108*4882a593Smuzhiyun 	p = READ_ONCE(s->task);
109*4882a593Smuzhiyun 	s->done = 1;
110*4882a593Smuzhiyun 	wake_up_process(p);
111*4882a593Smuzhiyun 	rcu_read_unlock();
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
__closure_sync(struct closure * cl)114*4882a593Smuzhiyun void __sched __closure_sync(struct closure *cl)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	struct closure_syncer s = { .task = current };
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	cl->s = &s;
119*4882a593Smuzhiyun 	continue_at(cl, closure_sync_fn, NULL);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	while (1) {
122*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
123*4882a593Smuzhiyun 		if (s.done)
124*4882a593Smuzhiyun 			break;
125*4882a593Smuzhiyun 		schedule();
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun static LIST_HEAD(closure_list);
134*4882a593Smuzhiyun static DEFINE_SPINLOCK(closure_list_lock);
135*4882a593Smuzhiyun 
closure_debug_create(struct closure * cl)136*4882a593Smuzhiyun void closure_debug_create(struct closure *cl)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	unsigned long flags;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
141*4882a593Smuzhiyun 	cl->magic = CLOSURE_MAGIC_ALIVE;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	spin_lock_irqsave(&closure_list_lock, flags);
144*4882a593Smuzhiyun 	list_add(&cl->all, &closure_list);
145*4882a593Smuzhiyun 	spin_unlock_irqrestore(&closure_list_lock, flags);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
closure_debug_destroy(struct closure * cl)148*4882a593Smuzhiyun void closure_debug_destroy(struct closure *cl)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	unsigned long flags;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
153*4882a593Smuzhiyun 	cl->magic = CLOSURE_MAGIC_DEAD;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	spin_lock_irqsave(&closure_list_lock, flags);
156*4882a593Smuzhiyun 	list_del(&cl->all);
157*4882a593Smuzhiyun 	spin_unlock_irqrestore(&closure_list_lock, flags);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun static struct dentry *closure_debug;
161*4882a593Smuzhiyun 
debug_show(struct seq_file * f,void * data)162*4882a593Smuzhiyun static int debug_show(struct seq_file *f, void *data)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	struct closure *cl;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	spin_lock_irq(&closure_list_lock);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	list_for_each_entry(cl, &closure_list, all) {
169*4882a593Smuzhiyun 		int r = atomic_read(&cl->remaining);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		seq_printf(f, "%p: %pS -> %pS p %p r %i ",
172*4882a593Smuzhiyun 			   cl, (void *) cl->ip, cl->fn, cl->parent,
173*4882a593Smuzhiyun 			   r & CLOSURE_REMAINING_MASK);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		seq_printf(f, "%s%s\n",
176*4882a593Smuzhiyun 			   test_bit(WORK_STRUCT_PENDING_BIT,
177*4882a593Smuzhiyun 				    work_data_bits(&cl->work)) ? "Q" : "",
178*4882a593Smuzhiyun 			   r & CLOSURE_RUNNING	? "R" : "");
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		if (r & CLOSURE_WAITING)
181*4882a593Smuzhiyun 			seq_printf(f, " W %pS\n",
182*4882a593Smuzhiyun 				   (void *) cl->waiting_on);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		seq_printf(f, "\n");
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	spin_unlock_irq(&closure_list_lock);
188*4882a593Smuzhiyun 	return 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(debug);
192*4882a593Smuzhiyun 
closure_debug_init(void)193*4882a593Smuzhiyun void  __init closure_debug_init(void)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(bcache_debug))
196*4882a593Smuzhiyun 		/*
197*4882a593Smuzhiyun 		 * it is unnecessary to check return value of
198*4882a593Smuzhiyun 		 * debugfs_create_file(), we should not care
199*4882a593Smuzhiyun 		 * about this.
200*4882a593Smuzhiyun 		 */
201*4882a593Smuzhiyun 		closure_debug = debugfs_create_file(
202*4882a593Smuzhiyun 			"closures", 0400, bcache_debug, NULL, &debug_fops);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun #endif
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
207*4882a593Smuzhiyun MODULE_LICENSE("GPL");
208