1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2007 Oracle. All rights reserved.
4*4882a593Smuzhiyun * Copyright (C) 2014 Fujitsu. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/kthread.h>
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/spinlock.h>
11*4882a593Smuzhiyun #include <linux/freezer.h>
12*4882a593Smuzhiyun #include "async-thread.h"
13*4882a593Smuzhiyun #include "ctree.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun enum {
16*4882a593Smuzhiyun WORK_DONE_BIT,
17*4882a593Smuzhiyun WORK_ORDER_DONE_BIT,
18*4882a593Smuzhiyun WORK_HIGH_PRIO_BIT,
19*4882a593Smuzhiyun };
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define NO_THRESHOLD (-1)
22*4882a593Smuzhiyun #define DFT_THRESHOLD (32)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun struct __btrfs_workqueue {
25*4882a593Smuzhiyun struct workqueue_struct *normal_wq;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* File system this workqueue services */
28*4882a593Smuzhiyun struct btrfs_fs_info *fs_info;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* List head pointing to ordered work list */
31*4882a593Smuzhiyun struct list_head ordered_list;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* Spinlock for ordered_list */
34*4882a593Smuzhiyun spinlock_t list_lock;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* Thresholding related variants */
37*4882a593Smuzhiyun atomic_t pending;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* Up limit of concurrency workers */
40*4882a593Smuzhiyun int limit_active;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Current number of concurrency workers */
43*4882a593Smuzhiyun int current_active;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* Threshold to change current_active */
46*4882a593Smuzhiyun int thresh;
47*4882a593Smuzhiyun unsigned int count;
48*4882a593Smuzhiyun spinlock_t thres_lock;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun struct btrfs_workqueue {
52*4882a593Smuzhiyun struct __btrfs_workqueue *normal;
53*4882a593Smuzhiyun struct __btrfs_workqueue *high;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
btrfs_workqueue_owner(const struct __btrfs_workqueue * wq)56*4882a593Smuzhiyun struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun return wq->fs_info;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
btrfs_work_owner(const struct btrfs_work * work)61*4882a593Smuzhiyun struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun return work->wq->fs_info;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
btrfs_workqueue_normal_congested(const struct btrfs_workqueue * wq)66*4882a593Smuzhiyun bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * We could compare wq->normal->pending with num_online_cpus()
70*4882a593Smuzhiyun * to support "thresh == NO_THRESHOLD" case, but it requires
71*4882a593Smuzhiyun * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
72*4882a593Smuzhiyun * postpone it until someone needs the support of that case.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun if (wq->normal->thresh == NO_THRESHOLD)
75*4882a593Smuzhiyun return false;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun static struct __btrfs_workqueue *
__btrfs_alloc_workqueue(struct btrfs_fs_info * fs_info,const char * name,unsigned int flags,int limit_active,int thresh)81*4882a593Smuzhiyun __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
82*4882a593Smuzhiyun unsigned int flags, int limit_active, int thresh)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (!ret)
87*4882a593Smuzhiyun return NULL;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun ret->fs_info = fs_info;
90*4882a593Smuzhiyun ret->limit_active = limit_active;
91*4882a593Smuzhiyun atomic_set(&ret->pending, 0);
92*4882a593Smuzhiyun if (thresh == 0)
93*4882a593Smuzhiyun thresh = DFT_THRESHOLD;
94*4882a593Smuzhiyun /* For low threshold, disabling threshold is a better choice */
95*4882a593Smuzhiyun if (thresh < DFT_THRESHOLD) {
96*4882a593Smuzhiyun ret->current_active = limit_active;
97*4882a593Smuzhiyun ret->thresh = NO_THRESHOLD;
98*4882a593Smuzhiyun } else {
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * For threshold-able wq, let its concurrency grow on demand.
101*4882a593Smuzhiyun * Use minimal max_active at alloc time to reduce resource
102*4882a593Smuzhiyun * usage.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun ret->current_active = 1;
105*4882a593Smuzhiyun ret->thresh = thresh;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (flags & WQ_HIGHPRI)
109*4882a593Smuzhiyun ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags,
110*4882a593Smuzhiyun ret->current_active, name);
111*4882a593Smuzhiyun else
112*4882a593Smuzhiyun ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
113*4882a593Smuzhiyun ret->current_active, name);
114*4882a593Smuzhiyun if (!ret->normal_wq) {
115*4882a593Smuzhiyun kfree(ret);
116*4882a593Smuzhiyun return NULL;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun INIT_LIST_HEAD(&ret->ordered_list);
120*4882a593Smuzhiyun spin_lock_init(&ret->list_lock);
121*4882a593Smuzhiyun spin_lock_init(&ret->thres_lock);
122*4882a593Smuzhiyun trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
123*4882a593Smuzhiyun return ret;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun static inline void
127*4882a593Smuzhiyun __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
128*4882a593Smuzhiyun
btrfs_alloc_workqueue(struct btrfs_fs_info * fs_info,const char * name,unsigned int flags,int limit_active,int thresh)129*4882a593Smuzhiyun struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
130*4882a593Smuzhiyun const char *name,
131*4882a593Smuzhiyun unsigned int flags,
132*4882a593Smuzhiyun int limit_active,
133*4882a593Smuzhiyun int thresh)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (!ret)
138*4882a593Smuzhiyun return NULL;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun ret->normal = __btrfs_alloc_workqueue(fs_info, name,
141*4882a593Smuzhiyun flags & ~WQ_HIGHPRI,
142*4882a593Smuzhiyun limit_active, thresh);
143*4882a593Smuzhiyun if (!ret->normal) {
144*4882a593Smuzhiyun kfree(ret);
145*4882a593Smuzhiyun return NULL;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (flags & WQ_HIGHPRI) {
149*4882a593Smuzhiyun ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
150*4882a593Smuzhiyun limit_active, thresh);
151*4882a593Smuzhiyun if (!ret->high) {
152*4882a593Smuzhiyun __btrfs_destroy_workqueue(ret->normal);
153*4882a593Smuzhiyun kfree(ret);
154*4882a593Smuzhiyun return NULL;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun return ret;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * Hook for threshold which will be called in btrfs_queue_work.
162*4882a593Smuzhiyun * This hook WILL be called in IRQ handler context,
163*4882a593Smuzhiyun * so workqueue_set_max_active MUST NOT be called in this hook
164*4882a593Smuzhiyun */
thresh_queue_hook(struct __btrfs_workqueue * wq)165*4882a593Smuzhiyun static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun if (wq->thresh == NO_THRESHOLD)
168*4882a593Smuzhiyun return;
169*4882a593Smuzhiyun atomic_inc(&wq->pending);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * Hook for threshold which will be called before executing the work,
174*4882a593Smuzhiyun * This hook is called in kthread content.
175*4882a593Smuzhiyun * So workqueue_set_max_active is called here.
176*4882a593Smuzhiyun */
thresh_exec_hook(struct __btrfs_workqueue * wq)177*4882a593Smuzhiyun static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun int new_current_active;
180*4882a593Smuzhiyun long pending;
181*4882a593Smuzhiyun int need_change = 0;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (wq->thresh == NO_THRESHOLD)
184*4882a593Smuzhiyun return;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun atomic_dec(&wq->pending);
187*4882a593Smuzhiyun spin_lock(&wq->thres_lock);
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * Use wq->count to limit the calling frequency of
190*4882a593Smuzhiyun * workqueue_set_max_active.
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun wq->count++;
193*4882a593Smuzhiyun wq->count %= (wq->thresh / 4);
194*4882a593Smuzhiyun if (!wq->count)
195*4882a593Smuzhiyun goto out;
196*4882a593Smuzhiyun new_current_active = wq->current_active;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * pending may be changed later, but it's OK since we really
200*4882a593Smuzhiyun * don't need it so accurate to calculate new_max_active.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun pending = atomic_read(&wq->pending);
203*4882a593Smuzhiyun if (pending > wq->thresh)
204*4882a593Smuzhiyun new_current_active++;
205*4882a593Smuzhiyun if (pending < wq->thresh / 2)
206*4882a593Smuzhiyun new_current_active--;
207*4882a593Smuzhiyun new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
208*4882a593Smuzhiyun if (new_current_active != wq->current_active) {
209*4882a593Smuzhiyun need_change = 1;
210*4882a593Smuzhiyun wq->current_active = new_current_active;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun out:
213*4882a593Smuzhiyun spin_unlock(&wq->thres_lock);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (need_change) {
216*4882a593Smuzhiyun workqueue_set_max_active(wq->normal_wq, wq->current_active);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
run_ordered_work(struct __btrfs_workqueue * wq,struct btrfs_work * self)220*4882a593Smuzhiyun static void run_ordered_work(struct __btrfs_workqueue *wq,
221*4882a593Smuzhiyun struct btrfs_work *self)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct list_head *list = &wq->ordered_list;
224*4882a593Smuzhiyun struct btrfs_work *work;
225*4882a593Smuzhiyun spinlock_t *lock = &wq->list_lock;
226*4882a593Smuzhiyun unsigned long flags;
227*4882a593Smuzhiyun bool free_self = false;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun while (1) {
230*4882a593Smuzhiyun spin_lock_irqsave(lock, flags);
231*4882a593Smuzhiyun if (list_empty(list))
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun work = list_entry(list->next, struct btrfs_work,
234*4882a593Smuzhiyun ordered_list);
235*4882a593Smuzhiyun if (!test_bit(WORK_DONE_BIT, &work->flags))
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * Orders all subsequent loads after reading WORK_DONE_BIT,
239*4882a593Smuzhiyun * paired with the smp_mb__before_atomic in btrfs_work_helper
240*4882a593Smuzhiyun * this guarantees that the ordered function will see all
241*4882a593Smuzhiyun * updates from ordinary work function.
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun smp_rmb();
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * we are going to call the ordered done function, but
247*4882a593Smuzhiyun * we leave the work item on the list as a barrier so
248*4882a593Smuzhiyun * that later work items that are done don't have their
249*4882a593Smuzhiyun * functions called before this one returns
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
252*4882a593Smuzhiyun break;
253*4882a593Smuzhiyun trace_btrfs_ordered_sched(work);
254*4882a593Smuzhiyun spin_unlock_irqrestore(lock, flags);
255*4882a593Smuzhiyun work->ordered_func(work);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* now take the lock again and drop our item from the list */
258*4882a593Smuzhiyun spin_lock_irqsave(lock, flags);
259*4882a593Smuzhiyun list_del(&work->ordered_list);
260*4882a593Smuzhiyun spin_unlock_irqrestore(lock, flags);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (work == self) {
263*4882a593Smuzhiyun /*
264*4882a593Smuzhiyun * This is the work item that the worker is currently
265*4882a593Smuzhiyun * executing.
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * The kernel workqueue code guarantees non-reentrancy
268*4882a593Smuzhiyun * of work items. I.e., if a work item with the same
269*4882a593Smuzhiyun * address and work function is queued twice, the second
270*4882a593Smuzhiyun * execution is blocked until the first one finishes. A
271*4882a593Smuzhiyun * work item may be freed and recycled with the same
272*4882a593Smuzhiyun * work function; the workqueue code assumes that the
273*4882a593Smuzhiyun * original work item cannot depend on the recycled work
274*4882a593Smuzhiyun * item in that case (see find_worker_executing_work()).
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * Note that different types of Btrfs work can depend on
277*4882a593Smuzhiyun * each other, and one type of work on one Btrfs
278*4882a593Smuzhiyun * filesystem may even depend on the same type of work
279*4882a593Smuzhiyun * on another Btrfs filesystem via, e.g., a loop device.
280*4882a593Smuzhiyun * Therefore, we must not allow the current work item to
281*4882a593Smuzhiyun * be recycled until we are really done, otherwise we
282*4882a593Smuzhiyun * break the above assumption and can deadlock.
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun free_self = true;
285*4882a593Smuzhiyun } else {
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * We don't want to call the ordered free functions with
288*4882a593Smuzhiyun * the lock held.
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun work->ordered_free(work);
291*4882a593Smuzhiyun /* NB: work must not be dereferenced past this point. */
292*4882a593Smuzhiyun trace_btrfs_all_work_done(wq->fs_info, work);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun spin_unlock_irqrestore(lock, flags);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (free_self) {
298*4882a593Smuzhiyun self->ordered_free(self);
299*4882a593Smuzhiyun /* NB: self must not be dereferenced past this point. */
300*4882a593Smuzhiyun trace_btrfs_all_work_done(wq->fs_info, self);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
btrfs_work_helper(struct work_struct * normal_work)304*4882a593Smuzhiyun static void btrfs_work_helper(struct work_struct *normal_work)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
307*4882a593Smuzhiyun normal_work);
308*4882a593Smuzhiyun struct __btrfs_workqueue *wq;
309*4882a593Smuzhiyun int need_order = 0;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * We should not touch things inside work in the following cases:
313*4882a593Smuzhiyun * 1) after work->func() if it has no ordered_free
314*4882a593Smuzhiyun * Since the struct is freed in work->func().
315*4882a593Smuzhiyun * 2) after setting WORK_DONE_BIT
316*4882a593Smuzhiyun * The work may be freed in other threads almost instantly.
317*4882a593Smuzhiyun * So we save the needed things here.
318*4882a593Smuzhiyun */
319*4882a593Smuzhiyun if (work->ordered_func)
320*4882a593Smuzhiyun need_order = 1;
321*4882a593Smuzhiyun wq = work->wq;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun trace_btrfs_work_sched(work);
324*4882a593Smuzhiyun thresh_exec_hook(wq);
325*4882a593Smuzhiyun work->func(work);
326*4882a593Smuzhiyun if (need_order) {
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * Ensures all memory accesses done in the work function are
329*4882a593Smuzhiyun * ordered before setting the WORK_DONE_BIT. Ensuring the thread
330*4882a593Smuzhiyun * which is going to executed the ordered work sees them.
331*4882a593Smuzhiyun * Pairs with the smp_rmb in run_ordered_work.
332*4882a593Smuzhiyun */
333*4882a593Smuzhiyun smp_mb__before_atomic();
334*4882a593Smuzhiyun set_bit(WORK_DONE_BIT, &work->flags);
335*4882a593Smuzhiyun run_ordered_work(wq, work);
336*4882a593Smuzhiyun } else {
337*4882a593Smuzhiyun /* NB: work must not be dereferenced past this point. */
338*4882a593Smuzhiyun trace_btrfs_all_work_done(wq->fs_info, work);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
btrfs_init_work(struct btrfs_work * work,btrfs_func_t func,btrfs_func_t ordered_func,btrfs_func_t ordered_free)342*4882a593Smuzhiyun void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
343*4882a593Smuzhiyun btrfs_func_t ordered_func, btrfs_func_t ordered_free)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun work->func = func;
346*4882a593Smuzhiyun work->ordered_func = ordered_func;
347*4882a593Smuzhiyun work->ordered_free = ordered_free;
348*4882a593Smuzhiyun INIT_WORK(&work->normal_work, btrfs_work_helper);
349*4882a593Smuzhiyun INIT_LIST_HEAD(&work->ordered_list);
350*4882a593Smuzhiyun work->flags = 0;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
__btrfs_queue_work(struct __btrfs_workqueue * wq,struct btrfs_work * work)353*4882a593Smuzhiyun static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
354*4882a593Smuzhiyun struct btrfs_work *work)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun unsigned long flags;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun work->wq = wq;
359*4882a593Smuzhiyun thresh_queue_hook(wq);
360*4882a593Smuzhiyun if (work->ordered_func) {
361*4882a593Smuzhiyun spin_lock_irqsave(&wq->list_lock, flags);
362*4882a593Smuzhiyun list_add_tail(&work->ordered_list, &wq->ordered_list);
363*4882a593Smuzhiyun spin_unlock_irqrestore(&wq->list_lock, flags);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun trace_btrfs_work_queued(work);
366*4882a593Smuzhiyun queue_work(wq->normal_wq, &work->normal_work);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
btrfs_queue_work(struct btrfs_workqueue * wq,struct btrfs_work * work)369*4882a593Smuzhiyun void btrfs_queue_work(struct btrfs_workqueue *wq,
370*4882a593Smuzhiyun struct btrfs_work *work)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun struct __btrfs_workqueue *dest_wq;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
375*4882a593Smuzhiyun dest_wq = wq->high;
376*4882a593Smuzhiyun else
377*4882a593Smuzhiyun dest_wq = wq->normal;
378*4882a593Smuzhiyun __btrfs_queue_work(dest_wq, work);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue * wq)382*4882a593Smuzhiyun __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun destroy_workqueue(wq->normal_wq);
385*4882a593Smuzhiyun trace_btrfs_workqueue_destroy(wq);
386*4882a593Smuzhiyun kfree(wq);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
btrfs_destroy_workqueue(struct btrfs_workqueue * wq)389*4882a593Smuzhiyun void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun if (!wq)
392*4882a593Smuzhiyun return;
393*4882a593Smuzhiyun if (wq->high)
394*4882a593Smuzhiyun __btrfs_destroy_workqueue(wq->high);
395*4882a593Smuzhiyun __btrfs_destroy_workqueue(wq->normal);
396*4882a593Smuzhiyun kfree(wq);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
btrfs_workqueue_set_max(struct btrfs_workqueue * wq,int limit_active)399*4882a593Smuzhiyun void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun if (!wq)
402*4882a593Smuzhiyun return;
403*4882a593Smuzhiyun wq->normal->limit_active = limit_active;
404*4882a593Smuzhiyun if (wq->high)
405*4882a593Smuzhiyun wq->high->limit_active = limit_active;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
btrfs_set_work_high_priority(struct btrfs_work * work)408*4882a593Smuzhiyun void btrfs_set_work_high_priority(struct btrfs_work *work)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
btrfs_flush_workqueue(struct btrfs_workqueue * wq)413*4882a593Smuzhiyun void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun if (wq->high)
416*4882a593Smuzhiyun flush_workqueue(wq->high->normal_wq);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun flush_workqueue(wq->normal->normal_wq);
419*4882a593Smuzhiyun }
420