1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2017 Red Hat. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is released under the GPL.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include "dm-cache-background-tracker.h"
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*----------------------------------------------------------------*/
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define DM_MSG_PREFIX "dm-background-tracker"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun struct bt_work {
14*4882a593Smuzhiyun struct list_head list;
15*4882a593Smuzhiyun struct rb_node node;
16*4882a593Smuzhiyun struct policy_work work;
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct background_tracker {
20*4882a593Smuzhiyun unsigned max_work;
21*4882a593Smuzhiyun atomic_t pending_promotes;
22*4882a593Smuzhiyun atomic_t pending_writebacks;
23*4882a593Smuzhiyun atomic_t pending_demotes;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct list_head issued;
26*4882a593Smuzhiyun struct list_head queued;
27*4882a593Smuzhiyun struct rb_root pending;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct kmem_cache *work_cache;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
btracker_create(unsigned max_work)32*4882a593Smuzhiyun struct background_tracker *btracker_create(unsigned max_work)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (!b) {
37*4882a593Smuzhiyun DMERR("couldn't create background_tracker");
38*4882a593Smuzhiyun return NULL;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun b->max_work = max_work;
42*4882a593Smuzhiyun atomic_set(&b->pending_promotes, 0);
43*4882a593Smuzhiyun atomic_set(&b->pending_writebacks, 0);
44*4882a593Smuzhiyun atomic_set(&b->pending_demotes, 0);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun INIT_LIST_HEAD(&b->issued);
47*4882a593Smuzhiyun INIT_LIST_HEAD(&b->queued);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun b->pending = RB_ROOT;
50*4882a593Smuzhiyun b->work_cache = KMEM_CACHE(bt_work, 0);
51*4882a593Smuzhiyun if (!b->work_cache) {
52*4882a593Smuzhiyun DMERR("couldn't create mempool for background work items");
53*4882a593Smuzhiyun kfree(b);
54*4882a593Smuzhiyun b = NULL;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return b;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btracker_create);
60*4882a593Smuzhiyun
btracker_destroy(struct background_tracker * b)61*4882a593Smuzhiyun void btracker_destroy(struct background_tracker *b)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun kmem_cache_destroy(b->work_cache);
64*4882a593Smuzhiyun kfree(b);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btracker_destroy);
67*4882a593Smuzhiyun
cmp_oblock(dm_oblock_t lhs,dm_oblock_t rhs)68*4882a593Smuzhiyun static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun if (from_oblock(lhs) < from_oblock(rhs))
71*4882a593Smuzhiyun return -1;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (from_oblock(rhs) < from_oblock(lhs))
74*4882a593Smuzhiyun return 1;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun return 0;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
__insert_pending(struct background_tracker * b,struct bt_work * nw)79*4882a593Smuzhiyun static bool __insert_pending(struct background_tracker *b,
80*4882a593Smuzhiyun struct bt_work *nw)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun int cmp;
83*4882a593Smuzhiyun struct bt_work *w;
84*4882a593Smuzhiyun struct rb_node **new = &b->pending.rb_node, *parent = NULL;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun while (*new) {
87*4882a593Smuzhiyun w = container_of(*new, struct bt_work, node);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun parent = *new;
90*4882a593Smuzhiyun cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
91*4882a593Smuzhiyun if (cmp < 0)
92*4882a593Smuzhiyun new = &((*new)->rb_left);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun else if (cmp > 0)
95*4882a593Smuzhiyun new = &((*new)->rb_right);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun else
98*4882a593Smuzhiyun /* already present */
99*4882a593Smuzhiyun return false;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun rb_link_node(&nw->node, parent, new);
103*4882a593Smuzhiyun rb_insert_color(&nw->node, &b->pending);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun return true;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
__find_pending(struct background_tracker * b,dm_oblock_t oblock)108*4882a593Smuzhiyun static struct bt_work *__find_pending(struct background_tracker *b,
109*4882a593Smuzhiyun dm_oblock_t oblock)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun int cmp;
112*4882a593Smuzhiyun struct bt_work *w;
113*4882a593Smuzhiyun struct rb_node **new = &b->pending.rb_node;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun while (*new) {
116*4882a593Smuzhiyun w = container_of(*new, struct bt_work, node);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun cmp = cmp_oblock(w->work.oblock, oblock);
119*4882a593Smuzhiyun if (cmp < 0)
120*4882a593Smuzhiyun new = &((*new)->rb_left);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun else if (cmp > 0)
123*4882a593Smuzhiyun new = &((*new)->rb_right);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun else
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun return *new ? w : NULL;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun
update_stats(struct background_tracker * b,struct policy_work * w,int delta)133*4882a593Smuzhiyun static void update_stats(struct background_tracker *b, struct policy_work *w, int delta)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun switch (w->op) {
136*4882a593Smuzhiyun case POLICY_PROMOTE:
137*4882a593Smuzhiyun atomic_add(delta, &b->pending_promotes);
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun case POLICY_DEMOTE:
141*4882a593Smuzhiyun atomic_add(delta, &b->pending_demotes);
142*4882a593Smuzhiyun break;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun case POLICY_WRITEBACK:
145*4882a593Smuzhiyun atomic_add(delta, &b->pending_writebacks);
146*4882a593Smuzhiyun break;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
btracker_nr_writebacks_queued(struct background_tracker * b)150*4882a593Smuzhiyun unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return atomic_read(&b->pending_writebacks);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
155*4882a593Smuzhiyun
btracker_nr_demotions_queued(struct background_tracker * b)156*4882a593Smuzhiyun unsigned btracker_nr_demotions_queued(struct background_tracker *b)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun return atomic_read(&b->pending_demotes);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
161*4882a593Smuzhiyun
max_work_reached(struct background_tracker * b)162*4882a593Smuzhiyun static bool max_work_reached(struct background_tracker *b)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun return atomic_read(&b->pending_promotes) +
165*4882a593Smuzhiyun atomic_read(&b->pending_writebacks) +
166*4882a593Smuzhiyun atomic_read(&b->pending_demotes) >= b->max_work;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
alloc_work(struct background_tracker * b)169*4882a593Smuzhiyun static struct bt_work *alloc_work(struct background_tracker *b)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun if (max_work_reached(b))
172*4882a593Smuzhiyun return NULL;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
btracker_queue(struct background_tracker * b,struct policy_work * work,struct policy_work ** pwork)177*4882a593Smuzhiyun int btracker_queue(struct background_tracker *b,
178*4882a593Smuzhiyun struct policy_work *work,
179*4882a593Smuzhiyun struct policy_work **pwork)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct bt_work *w;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (pwork)
184*4882a593Smuzhiyun *pwork = NULL;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun w = alloc_work(b);
187*4882a593Smuzhiyun if (!w)
188*4882a593Smuzhiyun return -ENOMEM;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun memcpy(&w->work, work, sizeof(*work));
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (!__insert_pending(b, w)) {
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * There was a race, we'll just ignore this second
195*4882a593Smuzhiyun * bit of work for the same oblock.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun kmem_cache_free(b->work_cache, w);
198*4882a593Smuzhiyun return -EINVAL;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (pwork) {
202*4882a593Smuzhiyun *pwork = &w->work;
203*4882a593Smuzhiyun list_add(&w->list, &b->issued);
204*4882a593Smuzhiyun } else
205*4882a593Smuzhiyun list_add(&w->list, &b->queued);
206*4882a593Smuzhiyun update_stats(b, &w->work, 1);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btracker_queue);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * Returns -ENODATA if there's no work.
214*4882a593Smuzhiyun */
btracker_issue(struct background_tracker * b,struct policy_work ** work)215*4882a593Smuzhiyun int btracker_issue(struct background_tracker *b, struct policy_work **work)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct bt_work *w;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun if (list_empty(&b->queued))
220*4882a593Smuzhiyun return -ENODATA;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun w = list_first_entry(&b->queued, struct bt_work, list);
223*4882a593Smuzhiyun list_move(&w->list, &b->issued);
224*4882a593Smuzhiyun *work = &w->work;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return 0;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btracker_issue);
229*4882a593Smuzhiyun
btracker_complete(struct background_tracker * b,struct policy_work * op)230*4882a593Smuzhiyun void btracker_complete(struct background_tracker *b,
231*4882a593Smuzhiyun struct policy_work *op)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct bt_work *w = container_of(op, struct bt_work, work);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun update_stats(b, &w->work, -1);
236*4882a593Smuzhiyun rb_erase(&w->node, &b->pending);
237*4882a593Smuzhiyun list_del(&w->list);
238*4882a593Smuzhiyun kmem_cache_free(b->work_cache, w);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btracker_complete);
241*4882a593Smuzhiyun
btracker_promotion_already_present(struct background_tracker * b,dm_oblock_t oblock)242*4882a593Smuzhiyun bool btracker_promotion_already_present(struct background_tracker *b,
243*4882a593Smuzhiyun dm_oblock_t oblock)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun return __find_pending(b, oblock) != NULL;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btracker_promotion_already_present);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*----------------------------------------------------------------*/
250