1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2015 Red Hat. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is released under the GPL.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include "dm-cache-background-tracker.h"
8*4882a593Smuzhiyun #include "dm-cache-policy-internal.h"
9*4882a593Smuzhiyun #include "dm-cache-policy.h"
10*4882a593Smuzhiyun #include "dm.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/hash.h>
13*4882a593Smuzhiyun #include <linux/jiffies.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/mutex.h>
16*4882a593Smuzhiyun #include <linux/vmalloc.h>
17*4882a593Smuzhiyun #include <linux/math64.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define DM_MSG_PREFIX "cache-policy-smq"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*----------------------------------------------------------------*/
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Safe division functions that return zero on divide by zero.
25*4882a593Smuzhiyun */
safe_div(unsigned n,unsigned d)26*4882a593Smuzhiyun static unsigned safe_div(unsigned n, unsigned d)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun return d ? n / d : 0u;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
safe_mod(unsigned n,unsigned d)31*4882a593Smuzhiyun static unsigned safe_mod(unsigned n, unsigned d)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun return d ? n % d : 0u;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*----------------------------------------------------------------*/
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun struct entry {
39*4882a593Smuzhiyun unsigned hash_next:28;
40*4882a593Smuzhiyun unsigned prev:28;
41*4882a593Smuzhiyun unsigned next:28;
42*4882a593Smuzhiyun unsigned level:6;
43*4882a593Smuzhiyun bool dirty:1;
44*4882a593Smuzhiyun bool allocated:1;
45*4882a593Smuzhiyun bool sentinel:1;
46*4882a593Smuzhiyun bool pending_work:1;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun dm_oblock_t oblock;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*----------------------------------------------------------------*/
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define INDEXER_NULL ((1u << 28u) - 1u)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * An entry_space manages a set of entries that we use for the queues.
57*4882a593Smuzhiyun * The clean and dirty queues share entries, so this object is separate
58*4882a593Smuzhiyun * from the queue itself.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun struct entry_space {
61*4882a593Smuzhiyun struct entry *begin;
62*4882a593Smuzhiyun struct entry *end;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
space_init(struct entry_space * es,unsigned nr_entries)65*4882a593Smuzhiyun static int space_init(struct entry_space *es, unsigned nr_entries)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun if (!nr_entries) {
68*4882a593Smuzhiyun es->begin = es->end = NULL;
69*4882a593Smuzhiyun return 0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry)));
73*4882a593Smuzhiyun if (!es->begin)
74*4882a593Smuzhiyun return -ENOMEM;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun es->end = es->begin + nr_entries;
77*4882a593Smuzhiyun return 0;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
space_exit(struct entry_space * es)80*4882a593Smuzhiyun static void space_exit(struct entry_space *es)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun vfree(es->begin);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
__get_entry(struct entry_space * es,unsigned block)85*4882a593Smuzhiyun static struct entry *__get_entry(struct entry_space *es, unsigned block)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct entry *e;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun e = es->begin + block;
90*4882a593Smuzhiyun BUG_ON(e >= es->end);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return e;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
to_index(struct entry_space * es,struct entry * e)95*4882a593Smuzhiyun static unsigned to_index(struct entry_space *es, struct entry *e)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun BUG_ON(e < es->begin || e >= es->end);
98*4882a593Smuzhiyun return e - es->begin;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
to_entry(struct entry_space * es,unsigned block)101*4882a593Smuzhiyun static struct entry *to_entry(struct entry_space *es, unsigned block)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun if (block == INDEXER_NULL)
104*4882a593Smuzhiyun return NULL;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun return __get_entry(es, block);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*----------------------------------------------------------------*/
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun struct ilist {
112*4882a593Smuzhiyun unsigned nr_elts; /* excluding sentinel entries */
113*4882a593Smuzhiyun unsigned head, tail;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
l_init(struct ilist * l)116*4882a593Smuzhiyun static void l_init(struct ilist *l)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun l->nr_elts = 0;
119*4882a593Smuzhiyun l->head = l->tail = INDEXER_NULL;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
l_head(struct entry_space * es,struct ilist * l)122*4882a593Smuzhiyun static struct entry *l_head(struct entry_space *es, struct ilist *l)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun return to_entry(es, l->head);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
l_tail(struct entry_space * es,struct ilist * l)127*4882a593Smuzhiyun static struct entry *l_tail(struct entry_space *es, struct ilist *l)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun return to_entry(es, l->tail);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
l_next(struct entry_space * es,struct entry * e)132*4882a593Smuzhiyun static struct entry *l_next(struct entry_space *es, struct entry *e)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun return to_entry(es, e->next);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
l_prev(struct entry_space * es,struct entry * e)137*4882a593Smuzhiyun static struct entry *l_prev(struct entry_space *es, struct entry *e)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun return to_entry(es, e->prev);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
l_empty(struct ilist * l)142*4882a593Smuzhiyun static bool l_empty(struct ilist *l)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun return l->head == INDEXER_NULL;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
l_add_head(struct entry_space * es,struct ilist * l,struct entry * e)147*4882a593Smuzhiyun static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct entry *head = l_head(es, l);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun e->next = l->head;
152*4882a593Smuzhiyun e->prev = INDEXER_NULL;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (head)
155*4882a593Smuzhiyun head->prev = l->head = to_index(es, e);
156*4882a593Smuzhiyun else
157*4882a593Smuzhiyun l->head = l->tail = to_index(es, e);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (!e->sentinel)
160*4882a593Smuzhiyun l->nr_elts++;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
l_add_tail(struct entry_space * es,struct ilist * l,struct entry * e)163*4882a593Smuzhiyun static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct entry *tail = l_tail(es, l);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun e->next = INDEXER_NULL;
168*4882a593Smuzhiyun e->prev = l->tail;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (tail)
171*4882a593Smuzhiyun tail->next = l->tail = to_index(es, e);
172*4882a593Smuzhiyun else
173*4882a593Smuzhiyun l->head = l->tail = to_index(es, e);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (!e->sentinel)
176*4882a593Smuzhiyun l->nr_elts++;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
l_add_before(struct entry_space * es,struct ilist * l,struct entry * old,struct entry * e)179*4882a593Smuzhiyun static void l_add_before(struct entry_space *es, struct ilist *l,
180*4882a593Smuzhiyun struct entry *old, struct entry *e)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun struct entry *prev = l_prev(es, old);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (!prev)
185*4882a593Smuzhiyun l_add_head(es, l, e);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun else {
188*4882a593Smuzhiyun e->prev = old->prev;
189*4882a593Smuzhiyun e->next = to_index(es, old);
190*4882a593Smuzhiyun prev->next = old->prev = to_index(es, e);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (!e->sentinel)
193*4882a593Smuzhiyun l->nr_elts++;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
l_del(struct entry_space * es,struct ilist * l,struct entry * e)197*4882a593Smuzhiyun static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct entry *prev = l_prev(es, e);
200*4882a593Smuzhiyun struct entry *next = l_next(es, e);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (prev)
203*4882a593Smuzhiyun prev->next = e->next;
204*4882a593Smuzhiyun else
205*4882a593Smuzhiyun l->head = e->next;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (next)
208*4882a593Smuzhiyun next->prev = e->prev;
209*4882a593Smuzhiyun else
210*4882a593Smuzhiyun l->tail = e->prev;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (!e->sentinel)
213*4882a593Smuzhiyun l->nr_elts--;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
l_pop_head(struct entry_space * es,struct ilist * l)216*4882a593Smuzhiyun static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct entry *e;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun for (e = l_head(es, l); e; e = l_next(es, e))
221*4882a593Smuzhiyun if (!e->sentinel) {
222*4882a593Smuzhiyun l_del(es, l, e);
223*4882a593Smuzhiyun return e;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return NULL;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
l_pop_tail(struct entry_space * es,struct ilist * l)229*4882a593Smuzhiyun static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct entry *e;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun for (e = l_tail(es, l); e; e = l_prev(es, e))
234*4882a593Smuzhiyun if (!e->sentinel) {
235*4882a593Smuzhiyun l_del(es, l, e);
236*4882a593Smuzhiyun return e;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return NULL;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /*----------------------------------------------------------------*/
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * The stochastic-multi-queue is a set of lru lists stacked into levels.
246*4882a593Smuzhiyun * Entries are moved up levels when they are used, which loosely orders the
247*4882a593Smuzhiyun * most accessed entries in the top levels and least in the bottom. This
248*4882a593Smuzhiyun * structure is *much* better than a single lru list.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun #define MAX_LEVELS 64u
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun struct queue {
253*4882a593Smuzhiyun struct entry_space *es;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun unsigned nr_elts;
256*4882a593Smuzhiyun unsigned nr_levels;
257*4882a593Smuzhiyun struct ilist qs[MAX_LEVELS];
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * We maintain a count of the number of entries we would like in each
261*4882a593Smuzhiyun * level.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun unsigned last_target_nr_elts;
264*4882a593Smuzhiyun unsigned nr_top_levels;
265*4882a593Smuzhiyun unsigned nr_in_top_levels;
266*4882a593Smuzhiyun unsigned target_count[MAX_LEVELS];
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun
q_init(struct queue * q,struct entry_space * es,unsigned nr_levels)269*4882a593Smuzhiyun static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun unsigned i;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun q->es = es;
274*4882a593Smuzhiyun q->nr_elts = 0;
275*4882a593Smuzhiyun q->nr_levels = nr_levels;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun for (i = 0; i < q->nr_levels; i++) {
278*4882a593Smuzhiyun l_init(q->qs + i);
279*4882a593Smuzhiyun q->target_count[i] = 0u;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun q->last_target_nr_elts = 0u;
283*4882a593Smuzhiyun q->nr_top_levels = 0u;
284*4882a593Smuzhiyun q->nr_in_top_levels = 0u;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
q_size(struct queue * q)287*4882a593Smuzhiyun static unsigned q_size(struct queue *q)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun return q->nr_elts;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun * Insert an entry to the back of the given level.
294*4882a593Smuzhiyun */
q_push(struct queue * q,struct entry * e)295*4882a593Smuzhiyun static void q_push(struct queue *q, struct entry *e)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun BUG_ON(e->pending_work);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (!e->sentinel)
300*4882a593Smuzhiyun q->nr_elts++;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun l_add_tail(q->es, q->qs + e->level, e);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
q_push_front(struct queue * q,struct entry * e)305*4882a593Smuzhiyun static void q_push_front(struct queue *q, struct entry *e)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun BUG_ON(e->pending_work);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (!e->sentinel)
310*4882a593Smuzhiyun q->nr_elts++;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun l_add_head(q->es, q->qs + e->level, e);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
q_push_before(struct queue * q,struct entry * old,struct entry * e)315*4882a593Smuzhiyun static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun BUG_ON(e->pending_work);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (!e->sentinel)
320*4882a593Smuzhiyun q->nr_elts++;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun l_add_before(q->es, q->qs + e->level, old, e);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
q_del(struct queue * q,struct entry * e)325*4882a593Smuzhiyun static void q_del(struct queue *q, struct entry *e)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun l_del(q->es, q->qs + e->level, e);
328*4882a593Smuzhiyun if (!e->sentinel)
329*4882a593Smuzhiyun q->nr_elts--;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * Return the oldest entry of the lowest populated level.
334*4882a593Smuzhiyun */
q_peek(struct queue * q,unsigned max_level,bool can_cross_sentinel)335*4882a593Smuzhiyun static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun unsigned level;
338*4882a593Smuzhiyun struct entry *e;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun max_level = min(max_level, q->nr_levels);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun for (level = 0; level < max_level; level++)
343*4882a593Smuzhiyun for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
344*4882a593Smuzhiyun if (e->sentinel) {
345*4882a593Smuzhiyun if (can_cross_sentinel)
346*4882a593Smuzhiyun continue;
347*4882a593Smuzhiyun else
348*4882a593Smuzhiyun break;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun return e;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun return NULL;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
q_pop(struct queue * q)357*4882a593Smuzhiyun static struct entry *q_pop(struct queue *q)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct entry *e = q_peek(q, q->nr_levels, true);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (e)
362*4882a593Smuzhiyun q_del(q, e);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return e;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /*
368*4882a593Smuzhiyun * This function assumes there is a non-sentinel entry to pop. It's only
369*4882a593Smuzhiyun * used by redistribute, so we know this is true. It also doesn't adjust
370*4882a593Smuzhiyun * the q->nr_elts count.
371*4882a593Smuzhiyun */
__redist_pop_from(struct queue * q,unsigned level)372*4882a593Smuzhiyun static struct entry *__redist_pop_from(struct queue *q, unsigned level)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct entry *e;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun for (; level < q->nr_levels; level++)
377*4882a593Smuzhiyun for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
378*4882a593Smuzhiyun if (!e->sentinel) {
379*4882a593Smuzhiyun l_del(q->es, q->qs + e->level, e);
380*4882a593Smuzhiyun return e;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return NULL;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
q_set_targets_subrange_(struct queue * q,unsigned nr_elts,unsigned lbegin,unsigned lend)386*4882a593Smuzhiyun static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun unsigned level, nr_levels, entries_per_level, remainder;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun BUG_ON(lbegin > lend);
391*4882a593Smuzhiyun BUG_ON(lend > q->nr_levels);
392*4882a593Smuzhiyun nr_levels = lend - lbegin;
393*4882a593Smuzhiyun entries_per_level = safe_div(nr_elts, nr_levels);
394*4882a593Smuzhiyun remainder = safe_mod(nr_elts, nr_levels);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun for (level = lbegin; level < lend; level++)
397*4882a593Smuzhiyun q->target_count[level] =
398*4882a593Smuzhiyun (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * Typically we have fewer elements in the top few levels which allows us
403*4882a593Smuzhiyun * to adjust the promote threshold nicely.
404*4882a593Smuzhiyun */
q_set_targets(struct queue * q)405*4882a593Smuzhiyun static void q_set_targets(struct queue *q)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun if (q->last_target_nr_elts == q->nr_elts)
408*4882a593Smuzhiyun return;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun q->last_target_nr_elts = q->nr_elts;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (q->nr_top_levels > q->nr_levels)
413*4882a593Smuzhiyun q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun else {
416*4882a593Smuzhiyun q_set_targets_subrange_(q, q->nr_in_top_levels,
417*4882a593Smuzhiyun q->nr_levels - q->nr_top_levels, q->nr_levels);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (q->nr_in_top_levels < q->nr_elts)
420*4882a593Smuzhiyun q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
421*4882a593Smuzhiyun 0, q->nr_levels - q->nr_top_levels);
422*4882a593Smuzhiyun else
423*4882a593Smuzhiyun q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
q_redistribute(struct queue * q)427*4882a593Smuzhiyun static void q_redistribute(struct queue *q)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun unsigned target, level;
430*4882a593Smuzhiyun struct ilist *l, *l_above;
431*4882a593Smuzhiyun struct entry *e;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun q_set_targets(q);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun for (level = 0u; level < q->nr_levels - 1u; level++) {
436*4882a593Smuzhiyun l = q->qs + level;
437*4882a593Smuzhiyun target = q->target_count[level];
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /*
440*4882a593Smuzhiyun * Pull down some entries from the level above.
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun while (l->nr_elts < target) {
443*4882a593Smuzhiyun e = __redist_pop_from(q, level + 1u);
444*4882a593Smuzhiyun if (!e) {
445*4882a593Smuzhiyun /* bug in nr_elts */
446*4882a593Smuzhiyun break;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun e->level = level;
450*4882a593Smuzhiyun l_add_tail(q->es, l, e);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /*
454*4882a593Smuzhiyun * Push some entries up.
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun l_above = q->qs + level + 1u;
457*4882a593Smuzhiyun while (l->nr_elts > target) {
458*4882a593Smuzhiyun e = l_pop_tail(q->es, l);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (!e)
461*4882a593Smuzhiyun /* bug in nr_elts */
462*4882a593Smuzhiyun break;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun e->level = level + 1u;
465*4882a593Smuzhiyun l_add_tail(q->es, l_above, e);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
q_requeue(struct queue * q,struct entry * e,unsigned extra_levels,struct entry * s1,struct entry * s2)470*4882a593Smuzhiyun static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
471*4882a593Smuzhiyun struct entry *s1, struct entry *s2)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct entry *de;
474*4882a593Smuzhiyun unsigned sentinels_passed = 0;
475*4882a593Smuzhiyun unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* try and find an entry to swap with */
478*4882a593Smuzhiyun if (extra_levels && (e->level < q->nr_levels - 1u)) {
479*4882a593Smuzhiyun for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
480*4882a593Smuzhiyun sentinels_passed++;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (de) {
483*4882a593Smuzhiyun q_del(q, de);
484*4882a593Smuzhiyun de->level = e->level;
485*4882a593Smuzhiyun if (s1) {
486*4882a593Smuzhiyun switch (sentinels_passed) {
487*4882a593Smuzhiyun case 0:
488*4882a593Smuzhiyun q_push_before(q, s1, de);
489*4882a593Smuzhiyun break;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun case 1:
492*4882a593Smuzhiyun q_push_before(q, s2, de);
493*4882a593Smuzhiyun break;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun default:
496*4882a593Smuzhiyun q_push(q, de);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun } else
499*4882a593Smuzhiyun q_push(q, de);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun q_del(q, e);
504*4882a593Smuzhiyun e->level = new_level;
505*4882a593Smuzhiyun q_push(q, e);
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /*----------------------------------------------------------------*/
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun #define FP_SHIFT 8
511*4882a593Smuzhiyun #define SIXTEENTH (1u << (FP_SHIFT - 4u))
512*4882a593Smuzhiyun #define EIGHTH (1u << (FP_SHIFT - 3u))
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun struct stats {
515*4882a593Smuzhiyun unsigned hit_threshold;
516*4882a593Smuzhiyun unsigned hits;
517*4882a593Smuzhiyun unsigned misses;
518*4882a593Smuzhiyun };
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun enum performance {
521*4882a593Smuzhiyun Q_POOR,
522*4882a593Smuzhiyun Q_FAIR,
523*4882a593Smuzhiyun Q_WELL
524*4882a593Smuzhiyun };
525*4882a593Smuzhiyun
stats_init(struct stats * s,unsigned nr_levels)526*4882a593Smuzhiyun static void stats_init(struct stats *s, unsigned nr_levels)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun s->hit_threshold = (nr_levels * 3u) / 4u;
529*4882a593Smuzhiyun s->hits = 0u;
530*4882a593Smuzhiyun s->misses = 0u;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
stats_reset(struct stats * s)533*4882a593Smuzhiyun static void stats_reset(struct stats *s)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun s->hits = s->misses = 0u;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
stats_level_accessed(struct stats * s,unsigned level)538*4882a593Smuzhiyun static void stats_level_accessed(struct stats *s, unsigned level)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun if (level >= s->hit_threshold)
541*4882a593Smuzhiyun s->hits++;
542*4882a593Smuzhiyun else
543*4882a593Smuzhiyun s->misses++;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
stats_miss(struct stats * s)546*4882a593Smuzhiyun static void stats_miss(struct stats *s)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun s->misses++;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /*
552*4882a593Smuzhiyun * There are times when we don't have any confidence in the hotspot queue.
553*4882a593Smuzhiyun * Such as when a fresh cache is created and the blocks have been spread
554*4882a593Smuzhiyun * out across the levels, or if an io load changes. We detect this by
555*4882a593Smuzhiyun * seeing how often a lookup is in the top levels of the hotspot queue.
556*4882a593Smuzhiyun */
stats_assess(struct stats * s)557*4882a593Smuzhiyun static enum performance stats_assess(struct stats *s)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (confidence < SIXTEENTH)
562*4882a593Smuzhiyun return Q_POOR;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun else if (confidence < EIGHTH)
565*4882a593Smuzhiyun return Q_FAIR;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun else
568*4882a593Smuzhiyun return Q_WELL;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /*----------------------------------------------------------------*/
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun struct smq_hash_table {
574*4882a593Smuzhiyun struct entry_space *es;
575*4882a593Smuzhiyun unsigned long long hash_bits;
576*4882a593Smuzhiyun unsigned *buckets;
577*4882a593Smuzhiyun };
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /*
580*4882a593Smuzhiyun * All cache entries are stored in a chained hash table. To save space we
581*4882a593Smuzhiyun * use indexing again, and only store indexes to the next entry.
582*4882a593Smuzhiyun */
h_init(struct smq_hash_table * ht,struct entry_space * es,unsigned nr_entries)583*4882a593Smuzhiyun static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun unsigned i, nr_buckets;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun ht->es = es;
588*4882a593Smuzhiyun nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
589*4882a593Smuzhiyun ht->hash_bits = __ffs(nr_buckets);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
592*4882a593Smuzhiyun if (!ht->buckets)
593*4882a593Smuzhiyun return -ENOMEM;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun for (i = 0; i < nr_buckets; i++)
596*4882a593Smuzhiyun ht->buckets[i] = INDEXER_NULL;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun return 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
h_exit(struct smq_hash_table * ht)601*4882a593Smuzhiyun static void h_exit(struct smq_hash_table *ht)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun vfree(ht->buckets);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
h_head(struct smq_hash_table * ht,unsigned bucket)606*4882a593Smuzhiyun static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun return to_entry(ht->es, ht->buckets[bucket]);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
h_next(struct smq_hash_table * ht,struct entry * e)611*4882a593Smuzhiyun static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun return to_entry(ht->es, e->hash_next);
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
__h_insert(struct smq_hash_table * ht,unsigned bucket,struct entry * e)616*4882a593Smuzhiyun static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun e->hash_next = ht->buckets[bucket];
619*4882a593Smuzhiyun ht->buckets[bucket] = to_index(ht->es, e);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
h_insert(struct smq_hash_table * ht,struct entry * e)622*4882a593Smuzhiyun static void h_insert(struct smq_hash_table *ht, struct entry *e)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
625*4882a593Smuzhiyun __h_insert(ht, h, e);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
__h_lookup(struct smq_hash_table * ht,unsigned h,dm_oblock_t oblock,struct entry ** prev)628*4882a593Smuzhiyun static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
629*4882a593Smuzhiyun struct entry **prev)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct entry *e;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun *prev = NULL;
634*4882a593Smuzhiyun for (e = h_head(ht, h); e; e = h_next(ht, e)) {
635*4882a593Smuzhiyun if (e->oblock == oblock)
636*4882a593Smuzhiyun return e;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun *prev = e;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return NULL;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
__h_unlink(struct smq_hash_table * ht,unsigned h,struct entry * e,struct entry * prev)644*4882a593Smuzhiyun static void __h_unlink(struct smq_hash_table *ht, unsigned h,
645*4882a593Smuzhiyun struct entry *e, struct entry *prev)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun if (prev)
648*4882a593Smuzhiyun prev->hash_next = e->hash_next;
649*4882a593Smuzhiyun else
650*4882a593Smuzhiyun ht->buckets[h] = e->hash_next;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun /*
654*4882a593Smuzhiyun * Also moves each entry to the front of the bucket.
655*4882a593Smuzhiyun */
h_lookup(struct smq_hash_table * ht,dm_oblock_t oblock)656*4882a593Smuzhiyun static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun struct entry *e, *prev;
659*4882a593Smuzhiyun unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun e = __h_lookup(ht, h, oblock, &prev);
662*4882a593Smuzhiyun if (e && prev) {
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun * Move to the front because this entry is likely
665*4882a593Smuzhiyun * to be hit again.
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun __h_unlink(ht, h, e, prev);
668*4882a593Smuzhiyun __h_insert(ht, h, e);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun return e;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
h_remove(struct smq_hash_table * ht,struct entry * e)674*4882a593Smuzhiyun static void h_remove(struct smq_hash_table *ht, struct entry *e)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
677*4882a593Smuzhiyun struct entry *prev;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /*
680*4882a593Smuzhiyun * The down side of using a singly linked list is we have to
681*4882a593Smuzhiyun * iterate the bucket to remove an item.
682*4882a593Smuzhiyun */
683*4882a593Smuzhiyun e = __h_lookup(ht, h, e->oblock, &prev);
684*4882a593Smuzhiyun if (e)
685*4882a593Smuzhiyun __h_unlink(ht, h, e, prev);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /*----------------------------------------------------------------*/
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun struct entry_alloc {
691*4882a593Smuzhiyun struct entry_space *es;
692*4882a593Smuzhiyun unsigned begin;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun unsigned nr_allocated;
695*4882a593Smuzhiyun struct ilist free;
696*4882a593Smuzhiyun };
697*4882a593Smuzhiyun
init_allocator(struct entry_alloc * ea,struct entry_space * es,unsigned begin,unsigned end)698*4882a593Smuzhiyun static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
699*4882a593Smuzhiyun unsigned begin, unsigned end)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun unsigned i;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun ea->es = es;
704*4882a593Smuzhiyun ea->nr_allocated = 0u;
705*4882a593Smuzhiyun ea->begin = begin;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun l_init(&ea->free);
708*4882a593Smuzhiyun for (i = begin; i != end; i++)
709*4882a593Smuzhiyun l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
init_entry(struct entry * e)712*4882a593Smuzhiyun static void init_entry(struct entry *e)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun /*
715*4882a593Smuzhiyun * We can't memset because that would clear the hotspot and
716*4882a593Smuzhiyun * sentinel bits which remain constant.
717*4882a593Smuzhiyun */
718*4882a593Smuzhiyun e->hash_next = INDEXER_NULL;
719*4882a593Smuzhiyun e->next = INDEXER_NULL;
720*4882a593Smuzhiyun e->prev = INDEXER_NULL;
721*4882a593Smuzhiyun e->level = 0u;
722*4882a593Smuzhiyun e->dirty = true; /* FIXME: audit */
723*4882a593Smuzhiyun e->allocated = true;
724*4882a593Smuzhiyun e->sentinel = false;
725*4882a593Smuzhiyun e->pending_work = false;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
alloc_entry(struct entry_alloc * ea)728*4882a593Smuzhiyun static struct entry *alloc_entry(struct entry_alloc *ea)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun struct entry *e;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (l_empty(&ea->free))
733*4882a593Smuzhiyun return NULL;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun e = l_pop_head(ea->es, &ea->free);
736*4882a593Smuzhiyun init_entry(e);
737*4882a593Smuzhiyun ea->nr_allocated++;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return e;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /*
743*4882a593Smuzhiyun * This assumes the cblock hasn't already been allocated.
744*4882a593Smuzhiyun */
alloc_particular_entry(struct entry_alloc * ea,unsigned i)745*4882a593Smuzhiyun static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct entry *e = __get_entry(ea->es, ea->begin + i);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun BUG_ON(e->allocated);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun l_del(ea->es, &ea->free, e);
752*4882a593Smuzhiyun init_entry(e);
753*4882a593Smuzhiyun ea->nr_allocated++;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun return e;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
free_entry(struct entry_alloc * ea,struct entry * e)758*4882a593Smuzhiyun static void free_entry(struct entry_alloc *ea, struct entry *e)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun BUG_ON(!ea->nr_allocated);
761*4882a593Smuzhiyun BUG_ON(!e->allocated);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun ea->nr_allocated--;
764*4882a593Smuzhiyun e->allocated = false;
765*4882a593Smuzhiyun l_add_tail(ea->es, &ea->free, e);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
allocator_empty(struct entry_alloc * ea)768*4882a593Smuzhiyun static bool allocator_empty(struct entry_alloc *ea)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun return l_empty(&ea->free);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
get_index(struct entry_alloc * ea,struct entry * e)773*4882a593Smuzhiyun static unsigned get_index(struct entry_alloc *ea, struct entry *e)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun return to_index(ea->es, e) - ea->begin;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
get_entry(struct entry_alloc * ea,unsigned index)778*4882a593Smuzhiyun static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun return __get_entry(ea->es, ea->begin + index);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /*----------------------------------------------------------------*/
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun #define NR_HOTSPOT_LEVELS 64u
786*4882a593Smuzhiyun #define NR_CACHE_LEVELS 64u
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun #define WRITEBACK_PERIOD (10ul * HZ)
789*4882a593Smuzhiyun #define DEMOTE_PERIOD (60ul * HZ)
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun #define HOTSPOT_UPDATE_PERIOD (HZ)
792*4882a593Smuzhiyun #define CACHE_UPDATE_PERIOD (60ul * HZ)
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun struct smq_policy {
795*4882a593Smuzhiyun struct dm_cache_policy policy;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /* protects everything */
798*4882a593Smuzhiyun spinlock_t lock;
799*4882a593Smuzhiyun dm_cblock_t cache_size;
800*4882a593Smuzhiyun sector_t cache_block_size;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun sector_t hotspot_block_size;
803*4882a593Smuzhiyun unsigned nr_hotspot_blocks;
804*4882a593Smuzhiyun unsigned cache_blocks_per_hotspot_block;
805*4882a593Smuzhiyun unsigned hotspot_level_jump;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun struct entry_space es;
808*4882a593Smuzhiyun struct entry_alloc writeback_sentinel_alloc;
809*4882a593Smuzhiyun struct entry_alloc demote_sentinel_alloc;
810*4882a593Smuzhiyun struct entry_alloc hotspot_alloc;
811*4882a593Smuzhiyun struct entry_alloc cache_alloc;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun unsigned long *hotspot_hit_bits;
814*4882a593Smuzhiyun unsigned long *cache_hit_bits;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun * We maintain three queues of entries. The cache proper,
818*4882a593Smuzhiyun * consisting of a clean and dirty queue, containing the currently
819*4882a593Smuzhiyun * active mappings. The hotspot queue uses a larger block size to
820*4882a593Smuzhiyun * track blocks that are being hit frequently and potential
821*4882a593Smuzhiyun * candidates for promotion to the cache.
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun struct queue hotspot;
824*4882a593Smuzhiyun struct queue clean;
825*4882a593Smuzhiyun struct queue dirty;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun struct stats hotspot_stats;
828*4882a593Smuzhiyun struct stats cache_stats;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /*
831*4882a593Smuzhiyun * Keeps track of time, incremented by the core. We use this to
832*4882a593Smuzhiyun * avoid attributing multiple hits within the same tick.
833*4882a593Smuzhiyun */
834*4882a593Smuzhiyun unsigned tick;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /*
837*4882a593Smuzhiyun * The hash tables allows us to quickly find an entry by origin
838*4882a593Smuzhiyun * block.
839*4882a593Smuzhiyun */
840*4882a593Smuzhiyun struct smq_hash_table table;
841*4882a593Smuzhiyun struct smq_hash_table hotspot_table;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun bool current_writeback_sentinels;
844*4882a593Smuzhiyun unsigned long next_writeback_period;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun bool current_demote_sentinels;
847*4882a593Smuzhiyun unsigned long next_demote_period;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun unsigned write_promote_level;
850*4882a593Smuzhiyun unsigned read_promote_level;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun unsigned long next_hotspot_period;
853*4882a593Smuzhiyun unsigned long next_cache_period;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun struct background_tracker *bg_work;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun bool migrations_allowed;
858*4882a593Smuzhiyun };
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /*----------------------------------------------------------------*/
861*4882a593Smuzhiyun
get_sentinel(struct entry_alloc * ea,unsigned level,bool which)862*4882a593Smuzhiyun static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
writeback_sentinel(struct smq_policy * mq,unsigned level)867*4882a593Smuzhiyun static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
demote_sentinel(struct smq_policy * mq,unsigned level)872*4882a593Smuzhiyun static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
__update_writeback_sentinels(struct smq_policy * mq)877*4882a593Smuzhiyun static void __update_writeback_sentinels(struct smq_policy *mq)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun unsigned level;
880*4882a593Smuzhiyun struct queue *q = &mq->dirty;
881*4882a593Smuzhiyun struct entry *sentinel;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun for (level = 0; level < q->nr_levels; level++) {
884*4882a593Smuzhiyun sentinel = writeback_sentinel(mq, level);
885*4882a593Smuzhiyun q_del(q, sentinel);
886*4882a593Smuzhiyun q_push(q, sentinel);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
__update_demote_sentinels(struct smq_policy * mq)890*4882a593Smuzhiyun static void __update_demote_sentinels(struct smq_policy *mq)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun unsigned level;
893*4882a593Smuzhiyun struct queue *q = &mq->clean;
894*4882a593Smuzhiyun struct entry *sentinel;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun for (level = 0; level < q->nr_levels; level++) {
897*4882a593Smuzhiyun sentinel = demote_sentinel(mq, level);
898*4882a593Smuzhiyun q_del(q, sentinel);
899*4882a593Smuzhiyun q_push(q, sentinel);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
update_sentinels(struct smq_policy * mq)903*4882a593Smuzhiyun static void update_sentinels(struct smq_policy *mq)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun if (time_after(jiffies, mq->next_writeback_period)) {
906*4882a593Smuzhiyun mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
907*4882a593Smuzhiyun mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
908*4882a593Smuzhiyun __update_writeback_sentinels(mq);
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun if (time_after(jiffies, mq->next_demote_period)) {
912*4882a593Smuzhiyun mq->next_demote_period = jiffies + DEMOTE_PERIOD;
913*4882a593Smuzhiyun mq->current_demote_sentinels = !mq->current_demote_sentinels;
914*4882a593Smuzhiyun __update_demote_sentinels(mq);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
__sentinels_init(struct smq_policy * mq)918*4882a593Smuzhiyun static void __sentinels_init(struct smq_policy *mq)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun unsigned level;
921*4882a593Smuzhiyun struct entry *sentinel;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun for (level = 0; level < NR_CACHE_LEVELS; level++) {
924*4882a593Smuzhiyun sentinel = writeback_sentinel(mq, level);
925*4882a593Smuzhiyun sentinel->level = level;
926*4882a593Smuzhiyun q_push(&mq->dirty, sentinel);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun sentinel = demote_sentinel(mq, level);
929*4882a593Smuzhiyun sentinel->level = level;
930*4882a593Smuzhiyun q_push(&mq->clean, sentinel);
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
sentinels_init(struct smq_policy * mq)934*4882a593Smuzhiyun static void sentinels_init(struct smq_policy *mq)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
937*4882a593Smuzhiyun mq->next_demote_period = jiffies + DEMOTE_PERIOD;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun mq->current_writeback_sentinels = false;
940*4882a593Smuzhiyun mq->current_demote_sentinels = false;
941*4882a593Smuzhiyun __sentinels_init(mq);
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
944*4882a593Smuzhiyun mq->current_demote_sentinels = !mq->current_demote_sentinels;
945*4882a593Smuzhiyun __sentinels_init(mq);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun /*----------------------------------------------------------------*/
949*4882a593Smuzhiyun
del_queue(struct smq_policy * mq,struct entry * e)950*4882a593Smuzhiyun static void del_queue(struct smq_policy *mq, struct entry *e)
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun q_del(e->dirty ? &mq->dirty : &mq->clean, e);
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
push_queue(struct smq_policy * mq,struct entry * e)955*4882a593Smuzhiyun static void push_queue(struct smq_policy *mq, struct entry *e)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun if (e->dirty)
958*4882a593Smuzhiyun q_push(&mq->dirty, e);
959*4882a593Smuzhiyun else
960*4882a593Smuzhiyun q_push(&mq->clean, e);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun // !h, !q, a -> h, q, a
push(struct smq_policy * mq,struct entry * e)964*4882a593Smuzhiyun static void push(struct smq_policy *mq, struct entry *e)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun h_insert(&mq->table, e);
967*4882a593Smuzhiyun if (!e->pending_work)
968*4882a593Smuzhiyun push_queue(mq, e);
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
push_queue_front(struct smq_policy * mq,struct entry * e)971*4882a593Smuzhiyun static void push_queue_front(struct smq_policy *mq, struct entry *e)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun if (e->dirty)
974*4882a593Smuzhiyun q_push_front(&mq->dirty, e);
975*4882a593Smuzhiyun else
976*4882a593Smuzhiyun q_push_front(&mq->clean, e);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
push_front(struct smq_policy * mq,struct entry * e)979*4882a593Smuzhiyun static void push_front(struct smq_policy *mq, struct entry *e)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun h_insert(&mq->table, e);
982*4882a593Smuzhiyun if (!e->pending_work)
983*4882a593Smuzhiyun push_queue_front(mq, e);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
infer_cblock(struct smq_policy * mq,struct entry * e)986*4882a593Smuzhiyun static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun return to_cblock(get_index(&mq->cache_alloc, e));
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
requeue(struct smq_policy * mq,struct entry * e)991*4882a593Smuzhiyun static void requeue(struct smq_policy *mq, struct entry *e)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun /*
994*4882a593Smuzhiyun * Pending work has temporarily been taken out of the queues.
995*4882a593Smuzhiyun */
996*4882a593Smuzhiyun if (e->pending_work)
997*4882a593Smuzhiyun return;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
1000*4882a593Smuzhiyun if (!e->dirty) {
1001*4882a593Smuzhiyun q_requeue(&mq->clean, e, 1u, NULL, NULL);
1002*4882a593Smuzhiyun return;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun q_requeue(&mq->dirty, e, 1u,
1006*4882a593Smuzhiyun get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
1007*4882a593Smuzhiyun get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
default_promote_level(struct smq_policy * mq)1011*4882a593Smuzhiyun static unsigned default_promote_level(struct smq_policy *mq)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun /*
1014*4882a593Smuzhiyun * The promote level depends on the current performance of the
1015*4882a593Smuzhiyun * cache.
1016*4882a593Smuzhiyun *
1017*4882a593Smuzhiyun * If the cache is performing badly, then we can't afford
1018*4882a593Smuzhiyun * to promote much without causing performance to drop below that
1019*4882a593Smuzhiyun * of the origin device.
1020*4882a593Smuzhiyun *
1021*4882a593Smuzhiyun * If the cache is performing well, then we don't need to promote
1022*4882a593Smuzhiyun * much. If it isn't broken, don't fix it.
1023*4882a593Smuzhiyun *
1024*4882a593Smuzhiyun * If the cache is middling then we promote more.
1025*4882a593Smuzhiyun *
1026*4882a593Smuzhiyun * This scheme reminds me of a graph of entropy vs probability of a
1027*4882a593Smuzhiyun * binary variable.
1028*4882a593Smuzhiyun */
1029*4882a593Smuzhiyun static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun unsigned hits = mq->cache_stats.hits;
1032*4882a593Smuzhiyun unsigned misses = mq->cache_stats.misses;
1033*4882a593Smuzhiyun unsigned index = safe_div(hits << 4u, hits + misses);
1034*4882a593Smuzhiyun return table[index];
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
update_promote_levels(struct smq_policy * mq)1037*4882a593Smuzhiyun static void update_promote_levels(struct smq_policy *mq)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun /*
1040*4882a593Smuzhiyun * If there are unused cache entries then we want to be really
1041*4882a593Smuzhiyun * eager to promote.
1042*4882a593Smuzhiyun */
1043*4882a593Smuzhiyun unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
1044*4882a593Smuzhiyun default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /*
1049*4882a593Smuzhiyun * If the hotspot queue is performing badly then we have little
1050*4882a593Smuzhiyun * confidence that we know which blocks to promote. So we cut down
1051*4882a593Smuzhiyun * the amount of promotions.
1052*4882a593Smuzhiyun */
1053*4882a593Smuzhiyun switch (stats_assess(&mq->hotspot_stats)) {
1054*4882a593Smuzhiyun case Q_POOR:
1055*4882a593Smuzhiyun threshold_level /= 4u;
1056*4882a593Smuzhiyun break;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun case Q_FAIR:
1059*4882a593Smuzhiyun threshold_level /= 2u;
1060*4882a593Smuzhiyun break;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun case Q_WELL:
1063*4882a593Smuzhiyun break;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
1067*4882a593Smuzhiyun mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun /*
1071*4882a593Smuzhiyun * If the hotspot queue is performing badly, then we try and move entries
1072*4882a593Smuzhiyun * around more quickly.
1073*4882a593Smuzhiyun */
update_level_jump(struct smq_policy * mq)1074*4882a593Smuzhiyun static void update_level_jump(struct smq_policy *mq)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun switch (stats_assess(&mq->hotspot_stats)) {
1077*4882a593Smuzhiyun case Q_POOR:
1078*4882a593Smuzhiyun mq->hotspot_level_jump = 4u;
1079*4882a593Smuzhiyun break;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun case Q_FAIR:
1082*4882a593Smuzhiyun mq->hotspot_level_jump = 2u;
1083*4882a593Smuzhiyun break;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun case Q_WELL:
1086*4882a593Smuzhiyun mq->hotspot_level_jump = 1u;
1087*4882a593Smuzhiyun break;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
end_hotspot_period(struct smq_policy * mq)1091*4882a593Smuzhiyun static void end_hotspot_period(struct smq_policy *mq)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1094*4882a593Smuzhiyun update_promote_levels(mq);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun if (time_after(jiffies, mq->next_hotspot_period)) {
1097*4882a593Smuzhiyun update_level_jump(mq);
1098*4882a593Smuzhiyun q_redistribute(&mq->hotspot);
1099*4882a593Smuzhiyun stats_reset(&mq->hotspot_stats);
1100*4882a593Smuzhiyun mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun
end_cache_period(struct smq_policy * mq)1104*4882a593Smuzhiyun static void end_cache_period(struct smq_policy *mq)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun if (time_after(jiffies, mq->next_cache_period)) {
1107*4882a593Smuzhiyun clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun q_redistribute(&mq->dirty);
1110*4882a593Smuzhiyun q_redistribute(&mq->clean);
1111*4882a593Smuzhiyun stats_reset(&mq->cache_stats);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun /*----------------------------------------------------------------*/
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun /*
1120*4882a593Smuzhiyun * Targets are given as a percentage.
1121*4882a593Smuzhiyun */
1122*4882a593Smuzhiyun #define CLEAN_TARGET 25u
1123*4882a593Smuzhiyun #define FREE_TARGET 25u
1124*4882a593Smuzhiyun
percent_to_target(struct smq_policy * mq,unsigned p)1125*4882a593Smuzhiyun static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun return from_cblock(mq->cache_size) * p / 100u;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
clean_target_met(struct smq_policy * mq,bool idle)1130*4882a593Smuzhiyun static bool clean_target_met(struct smq_policy *mq, bool idle)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun /*
1133*4882a593Smuzhiyun * Cache entries may not be populated. So we cannot rely on the
1134*4882a593Smuzhiyun * size of the clean queue.
1135*4882a593Smuzhiyun */
1136*4882a593Smuzhiyun if (idle) {
1137*4882a593Smuzhiyun /*
1138*4882a593Smuzhiyun * We'd like to clean everything.
1139*4882a593Smuzhiyun */
1140*4882a593Smuzhiyun return q_size(&mq->dirty) == 0u;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun /*
1144*4882a593Smuzhiyun * If we're busy we don't worry about cleaning at all.
1145*4882a593Smuzhiyun */
1146*4882a593Smuzhiyun return true;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
free_target_met(struct smq_policy * mq)1149*4882a593Smuzhiyun static bool free_target_met(struct smq_policy *mq)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun unsigned nr_free;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1154*4882a593Smuzhiyun return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1155*4882a593Smuzhiyun percent_to_target(mq, FREE_TARGET);
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /*----------------------------------------------------------------*/
1159*4882a593Smuzhiyun
mark_pending(struct smq_policy * mq,struct entry * e)1160*4882a593Smuzhiyun static void mark_pending(struct smq_policy *mq, struct entry *e)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun BUG_ON(e->sentinel);
1163*4882a593Smuzhiyun BUG_ON(!e->allocated);
1164*4882a593Smuzhiyun BUG_ON(e->pending_work);
1165*4882a593Smuzhiyun e->pending_work = true;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
clear_pending(struct smq_policy * mq,struct entry * e)1168*4882a593Smuzhiyun static void clear_pending(struct smq_policy *mq, struct entry *e)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun BUG_ON(!e->pending_work);
1171*4882a593Smuzhiyun e->pending_work = false;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
queue_writeback(struct smq_policy * mq,bool idle)1174*4882a593Smuzhiyun static void queue_writeback(struct smq_policy *mq, bool idle)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun int r;
1177*4882a593Smuzhiyun struct policy_work work;
1178*4882a593Smuzhiyun struct entry *e;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
1181*4882a593Smuzhiyun if (e) {
1182*4882a593Smuzhiyun mark_pending(mq, e);
1183*4882a593Smuzhiyun q_del(&mq->dirty, e);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun work.op = POLICY_WRITEBACK;
1186*4882a593Smuzhiyun work.oblock = e->oblock;
1187*4882a593Smuzhiyun work.cblock = infer_cblock(mq, e);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun r = btracker_queue(mq->bg_work, &work, NULL);
1190*4882a593Smuzhiyun if (r) {
1191*4882a593Smuzhiyun clear_pending(mq, e);
1192*4882a593Smuzhiyun q_push_front(&mq->dirty, e);
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
queue_demotion(struct smq_policy * mq)1197*4882a593Smuzhiyun static void queue_demotion(struct smq_policy *mq)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun int r;
1200*4882a593Smuzhiyun struct policy_work work;
1201*4882a593Smuzhiyun struct entry *e;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun if (WARN_ON_ONCE(!mq->migrations_allowed))
1204*4882a593Smuzhiyun return;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
1207*4882a593Smuzhiyun if (!e) {
1208*4882a593Smuzhiyun if (!clean_target_met(mq, true))
1209*4882a593Smuzhiyun queue_writeback(mq, false);
1210*4882a593Smuzhiyun return;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun mark_pending(mq, e);
1214*4882a593Smuzhiyun q_del(&mq->clean, e);
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun work.op = POLICY_DEMOTE;
1217*4882a593Smuzhiyun work.oblock = e->oblock;
1218*4882a593Smuzhiyun work.cblock = infer_cblock(mq, e);
1219*4882a593Smuzhiyun r = btracker_queue(mq->bg_work, &work, NULL);
1220*4882a593Smuzhiyun if (r) {
1221*4882a593Smuzhiyun clear_pending(mq, e);
1222*4882a593Smuzhiyun q_push_front(&mq->clean, e);
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
queue_promotion(struct smq_policy * mq,dm_oblock_t oblock,struct policy_work ** workp)1226*4882a593Smuzhiyun static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1227*4882a593Smuzhiyun struct policy_work **workp)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun int r;
1230*4882a593Smuzhiyun struct entry *e;
1231*4882a593Smuzhiyun struct policy_work work;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun if (!mq->migrations_allowed)
1234*4882a593Smuzhiyun return;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (allocator_empty(&mq->cache_alloc)) {
1237*4882a593Smuzhiyun /*
1238*4882a593Smuzhiyun * We always claim to be 'idle' to ensure some demotions happen
1239*4882a593Smuzhiyun * with continuous loads.
1240*4882a593Smuzhiyun */
1241*4882a593Smuzhiyun if (!free_target_met(mq))
1242*4882a593Smuzhiyun queue_demotion(mq);
1243*4882a593Smuzhiyun return;
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun if (btracker_promotion_already_present(mq->bg_work, oblock))
1247*4882a593Smuzhiyun return;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /*
1250*4882a593Smuzhiyun * We allocate the entry now to reserve the cblock. If the
1251*4882a593Smuzhiyun * background work is aborted we must remember to free it.
1252*4882a593Smuzhiyun */
1253*4882a593Smuzhiyun e = alloc_entry(&mq->cache_alloc);
1254*4882a593Smuzhiyun BUG_ON(!e);
1255*4882a593Smuzhiyun e->pending_work = true;
1256*4882a593Smuzhiyun work.op = POLICY_PROMOTE;
1257*4882a593Smuzhiyun work.oblock = oblock;
1258*4882a593Smuzhiyun work.cblock = infer_cblock(mq, e);
1259*4882a593Smuzhiyun r = btracker_queue(mq->bg_work, &work, workp);
1260*4882a593Smuzhiyun if (r)
1261*4882a593Smuzhiyun free_entry(&mq->cache_alloc, e);
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun /*----------------------------------------------------------------*/
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun enum promote_result {
1267*4882a593Smuzhiyun PROMOTE_NOT,
1268*4882a593Smuzhiyun PROMOTE_TEMPORARY,
1269*4882a593Smuzhiyun PROMOTE_PERMANENT
1270*4882a593Smuzhiyun };
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun /*
1273*4882a593Smuzhiyun * Converts a boolean into a promote result.
1274*4882a593Smuzhiyun */
maybe_promote(bool promote)1275*4882a593Smuzhiyun static enum promote_result maybe_promote(bool promote)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
should_promote(struct smq_policy * mq,struct entry * hs_e,int data_dir,bool fast_promote)1280*4882a593Smuzhiyun static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
1281*4882a593Smuzhiyun int data_dir, bool fast_promote)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun if (data_dir == WRITE) {
1284*4882a593Smuzhiyun if (!allocator_empty(&mq->cache_alloc) && fast_promote)
1285*4882a593Smuzhiyun return PROMOTE_TEMPORARY;
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun return maybe_promote(hs_e->level >= mq->write_promote_level);
1288*4882a593Smuzhiyun } else
1289*4882a593Smuzhiyun return maybe_promote(hs_e->level >= mq->read_promote_level);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
to_hblock(struct smq_policy * mq,dm_oblock_t b)1292*4882a593Smuzhiyun static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun sector_t r = from_oblock(b);
1295*4882a593Smuzhiyun (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
1296*4882a593Smuzhiyun return to_oblock(r);
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
update_hotspot_queue(struct smq_policy * mq,dm_oblock_t b)1299*4882a593Smuzhiyun static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun unsigned hi;
1302*4882a593Smuzhiyun dm_oblock_t hb = to_hblock(mq, b);
1303*4882a593Smuzhiyun struct entry *e = h_lookup(&mq->hotspot_table, hb);
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun if (e) {
1306*4882a593Smuzhiyun stats_level_accessed(&mq->hotspot_stats, e->level);
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun hi = get_index(&mq->hotspot_alloc, e);
1309*4882a593Smuzhiyun q_requeue(&mq->hotspot, e,
1310*4882a593Smuzhiyun test_and_set_bit(hi, mq->hotspot_hit_bits) ?
1311*4882a593Smuzhiyun 0u : mq->hotspot_level_jump,
1312*4882a593Smuzhiyun NULL, NULL);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun } else {
1315*4882a593Smuzhiyun stats_miss(&mq->hotspot_stats);
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun e = alloc_entry(&mq->hotspot_alloc);
1318*4882a593Smuzhiyun if (!e) {
1319*4882a593Smuzhiyun e = q_pop(&mq->hotspot);
1320*4882a593Smuzhiyun if (e) {
1321*4882a593Smuzhiyun h_remove(&mq->hotspot_table, e);
1322*4882a593Smuzhiyun hi = get_index(&mq->hotspot_alloc, e);
1323*4882a593Smuzhiyun clear_bit(hi, mq->hotspot_hit_bits);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun }
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun if (e) {
1329*4882a593Smuzhiyun e->oblock = hb;
1330*4882a593Smuzhiyun q_push(&mq->hotspot, e);
1331*4882a593Smuzhiyun h_insert(&mq->hotspot_table, e);
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun return e;
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun /*----------------------------------------------------------------*/
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun /*
1341*4882a593Smuzhiyun * Public interface, via the policy struct. See dm-cache-policy.h for a
1342*4882a593Smuzhiyun * description of these.
1343*4882a593Smuzhiyun */
1344*4882a593Smuzhiyun
to_smq_policy(struct dm_cache_policy * p)1345*4882a593Smuzhiyun static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun return container_of(p, struct smq_policy, policy);
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
smq_destroy(struct dm_cache_policy * p)1350*4882a593Smuzhiyun static void smq_destroy(struct dm_cache_policy *p)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun btracker_destroy(mq->bg_work);
1355*4882a593Smuzhiyun h_exit(&mq->hotspot_table);
1356*4882a593Smuzhiyun h_exit(&mq->table);
1357*4882a593Smuzhiyun free_bitset(mq->hotspot_hit_bits);
1358*4882a593Smuzhiyun free_bitset(mq->cache_hit_bits);
1359*4882a593Smuzhiyun space_exit(&mq->es);
1360*4882a593Smuzhiyun kfree(mq);
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun /*----------------------------------------------------------------*/
1364*4882a593Smuzhiyun
__lookup(struct smq_policy * mq,dm_oblock_t oblock,dm_cblock_t * cblock,int data_dir,bool fast_copy,struct policy_work ** work,bool * background_work)1365*4882a593Smuzhiyun static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1366*4882a593Smuzhiyun int data_dir, bool fast_copy,
1367*4882a593Smuzhiyun struct policy_work **work, bool *background_work)
1368*4882a593Smuzhiyun {
1369*4882a593Smuzhiyun struct entry *e, *hs_e;
1370*4882a593Smuzhiyun enum promote_result pr;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun *background_work = false;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun e = h_lookup(&mq->table, oblock);
1375*4882a593Smuzhiyun if (e) {
1376*4882a593Smuzhiyun stats_level_accessed(&mq->cache_stats, e->level);
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun requeue(mq, e);
1379*4882a593Smuzhiyun *cblock = infer_cblock(mq, e);
1380*4882a593Smuzhiyun return 0;
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun } else {
1383*4882a593Smuzhiyun stats_miss(&mq->cache_stats);
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /*
1386*4882a593Smuzhiyun * The hotspot queue only gets updated with misses.
1387*4882a593Smuzhiyun */
1388*4882a593Smuzhiyun hs_e = update_hotspot_queue(mq, oblock);
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun pr = should_promote(mq, hs_e, data_dir, fast_copy);
1391*4882a593Smuzhiyun if (pr != PROMOTE_NOT) {
1392*4882a593Smuzhiyun queue_promotion(mq, oblock, work);
1393*4882a593Smuzhiyun *background_work = true;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun return -ENOENT;
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun
smq_lookup(struct dm_cache_policy * p,dm_oblock_t oblock,dm_cblock_t * cblock,int data_dir,bool fast_copy,bool * background_work)1400*4882a593Smuzhiyun static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1401*4882a593Smuzhiyun int data_dir, bool fast_copy,
1402*4882a593Smuzhiyun bool *background_work)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun int r;
1405*4882a593Smuzhiyun unsigned long flags;
1406*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun spin_lock_irqsave(&mq->lock, flags);
1409*4882a593Smuzhiyun r = __lookup(mq, oblock, cblock,
1410*4882a593Smuzhiyun data_dir, fast_copy,
1411*4882a593Smuzhiyun NULL, background_work);
1412*4882a593Smuzhiyun spin_unlock_irqrestore(&mq->lock, flags);
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun return r;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
smq_lookup_with_work(struct dm_cache_policy * p,dm_oblock_t oblock,dm_cblock_t * cblock,int data_dir,bool fast_copy,struct policy_work ** work)1417*4882a593Smuzhiyun static int smq_lookup_with_work(struct dm_cache_policy *p,
1418*4882a593Smuzhiyun dm_oblock_t oblock, dm_cblock_t *cblock,
1419*4882a593Smuzhiyun int data_dir, bool fast_copy,
1420*4882a593Smuzhiyun struct policy_work **work)
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun int r;
1423*4882a593Smuzhiyun bool background_queued;
1424*4882a593Smuzhiyun unsigned long flags;
1425*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun spin_lock_irqsave(&mq->lock, flags);
1428*4882a593Smuzhiyun r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
1429*4882a593Smuzhiyun spin_unlock_irqrestore(&mq->lock, flags);
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun return r;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
smq_get_background_work(struct dm_cache_policy * p,bool idle,struct policy_work ** result)1434*4882a593Smuzhiyun static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1435*4882a593Smuzhiyun struct policy_work **result)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun int r;
1438*4882a593Smuzhiyun unsigned long flags;
1439*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun spin_lock_irqsave(&mq->lock, flags);
1442*4882a593Smuzhiyun r = btracker_issue(mq->bg_work, result);
1443*4882a593Smuzhiyun if (r == -ENODATA) {
1444*4882a593Smuzhiyun if (!clean_target_met(mq, idle)) {
1445*4882a593Smuzhiyun queue_writeback(mq, idle);
1446*4882a593Smuzhiyun r = btracker_issue(mq->bg_work, result);
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun spin_unlock_irqrestore(&mq->lock, flags);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun return r;
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun /*
1455*4882a593Smuzhiyun * We need to clear any pending work flags that have been set, and in the
1456*4882a593Smuzhiyun * case of promotion free the entry for the destination cblock.
1457*4882a593Smuzhiyun */
__complete_background_work(struct smq_policy * mq,struct policy_work * work,bool success)1458*4882a593Smuzhiyun static void __complete_background_work(struct smq_policy *mq,
1459*4882a593Smuzhiyun struct policy_work *work,
1460*4882a593Smuzhiyun bool success)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun struct entry *e = get_entry(&mq->cache_alloc,
1463*4882a593Smuzhiyun from_cblock(work->cblock));
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun switch (work->op) {
1466*4882a593Smuzhiyun case POLICY_PROMOTE:
1467*4882a593Smuzhiyun // !h, !q, a
1468*4882a593Smuzhiyun clear_pending(mq, e);
1469*4882a593Smuzhiyun if (success) {
1470*4882a593Smuzhiyun e->oblock = work->oblock;
1471*4882a593Smuzhiyun e->level = NR_CACHE_LEVELS - 1;
1472*4882a593Smuzhiyun push(mq, e);
1473*4882a593Smuzhiyun // h, q, a
1474*4882a593Smuzhiyun } else {
1475*4882a593Smuzhiyun free_entry(&mq->cache_alloc, e);
1476*4882a593Smuzhiyun // !h, !q, !a
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun break;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun case POLICY_DEMOTE:
1481*4882a593Smuzhiyun // h, !q, a
1482*4882a593Smuzhiyun if (success) {
1483*4882a593Smuzhiyun h_remove(&mq->table, e);
1484*4882a593Smuzhiyun free_entry(&mq->cache_alloc, e);
1485*4882a593Smuzhiyun // !h, !q, !a
1486*4882a593Smuzhiyun } else {
1487*4882a593Smuzhiyun clear_pending(mq, e);
1488*4882a593Smuzhiyun push_queue(mq, e);
1489*4882a593Smuzhiyun // h, q, a
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun break;
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun case POLICY_WRITEBACK:
1494*4882a593Smuzhiyun // h, !q, a
1495*4882a593Smuzhiyun clear_pending(mq, e);
1496*4882a593Smuzhiyun push_queue(mq, e);
1497*4882a593Smuzhiyun // h, q, a
1498*4882a593Smuzhiyun break;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun btracker_complete(mq->bg_work, work);
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
smq_complete_background_work(struct dm_cache_policy * p,struct policy_work * work,bool success)1504*4882a593Smuzhiyun static void smq_complete_background_work(struct dm_cache_policy *p,
1505*4882a593Smuzhiyun struct policy_work *work,
1506*4882a593Smuzhiyun bool success)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun unsigned long flags;
1509*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun spin_lock_irqsave(&mq->lock, flags);
1512*4882a593Smuzhiyun __complete_background_work(mq, work, success);
1513*4882a593Smuzhiyun spin_unlock_irqrestore(&mq->lock, flags);
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun // in_hash(oblock) -> in_hash(oblock)
__smq_set_clear_dirty(struct smq_policy * mq,dm_cblock_t cblock,bool set)1517*4882a593Smuzhiyun static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
1518*4882a593Smuzhiyun {
1519*4882a593Smuzhiyun struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun if (e->pending_work)
1522*4882a593Smuzhiyun e->dirty = set;
1523*4882a593Smuzhiyun else {
1524*4882a593Smuzhiyun del_queue(mq, e);
1525*4882a593Smuzhiyun e->dirty = set;
1526*4882a593Smuzhiyun push_queue(mq, e);
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun
smq_set_dirty(struct dm_cache_policy * p,dm_cblock_t cblock)1530*4882a593Smuzhiyun static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun unsigned long flags;
1533*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun spin_lock_irqsave(&mq->lock, flags);
1536*4882a593Smuzhiyun __smq_set_clear_dirty(mq, cblock, true);
1537*4882a593Smuzhiyun spin_unlock_irqrestore(&mq->lock, flags);
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun
smq_clear_dirty(struct dm_cache_policy * p,dm_cblock_t cblock)1540*4882a593Smuzhiyun static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1541*4882a593Smuzhiyun {
1542*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1543*4882a593Smuzhiyun unsigned long flags;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun spin_lock_irqsave(&mq->lock, flags);
1546*4882a593Smuzhiyun __smq_set_clear_dirty(mq, cblock, false);
1547*4882a593Smuzhiyun spin_unlock_irqrestore(&mq->lock, flags);
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun
random_level(dm_cblock_t cblock)1550*4882a593Smuzhiyun static unsigned random_level(dm_cblock_t cblock)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun
smq_load_mapping(struct dm_cache_policy * p,dm_oblock_t oblock,dm_cblock_t cblock,bool dirty,uint32_t hint,bool hint_valid)1555*4882a593Smuzhiyun static int smq_load_mapping(struct dm_cache_policy *p,
1556*4882a593Smuzhiyun dm_oblock_t oblock, dm_cblock_t cblock,
1557*4882a593Smuzhiyun bool dirty, uint32_t hint, bool hint_valid)
1558*4882a593Smuzhiyun {
1559*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1560*4882a593Smuzhiyun struct entry *e;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1563*4882a593Smuzhiyun e->oblock = oblock;
1564*4882a593Smuzhiyun e->dirty = dirty;
1565*4882a593Smuzhiyun e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1566*4882a593Smuzhiyun e->pending_work = false;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun /*
1569*4882a593Smuzhiyun * When we load mappings we push ahead of both sentinels in order to
1570*4882a593Smuzhiyun * allow demotions and cleaning to occur immediately.
1571*4882a593Smuzhiyun */
1572*4882a593Smuzhiyun push_front(mq, e);
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun return 0;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun
smq_invalidate_mapping(struct dm_cache_policy * p,dm_cblock_t cblock)1577*4882a593Smuzhiyun static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1580*4882a593Smuzhiyun struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun if (!e->allocated)
1583*4882a593Smuzhiyun return -ENODATA;
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun // FIXME: what if this block has pending background work?
1586*4882a593Smuzhiyun del_queue(mq, e);
1587*4882a593Smuzhiyun h_remove(&mq->table, e);
1588*4882a593Smuzhiyun free_entry(&mq->cache_alloc, e);
1589*4882a593Smuzhiyun return 0;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun
smq_get_hint(struct dm_cache_policy * p,dm_cblock_t cblock)1592*4882a593Smuzhiyun static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1595*4882a593Smuzhiyun struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun if (!e->allocated)
1598*4882a593Smuzhiyun return 0;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun return e->level;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun
smq_residency(struct dm_cache_policy * p)1603*4882a593Smuzhiyun static dm_cblock_t smq_residency(struct dm_cache_policy *p)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun dm_cblock_t r;
1606*4882a593Smuzhiyun unsigned long flags;
1607*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun spin_lock_irqsave(&mq->lock, flags);
1610*4882a593Smuzhiyun r = to_cblock(mq->cache_alloc.nr_allocated);
1611*4882a593Smuzhiyun spin_unlock_irqrestore(&mq->lock, flags);
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun return r;
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun
smq_tick(struct dm_cache_policy * p,bool can_block)1616*4882a593Smuzhiyun static void smq_tick(struct dm_cache_policy *p, bool can_block)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1619*4882a593Smuzhiyun unsigned long flags;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun spin_lock_irqsave(&mq->lock, flags);
1622*4882a593Smuzhiyun mq->tick++;
1623*4882a593Smuzhiyun update_sentinels(mq);
1624*4882a593Smuzhiyun end_hotspot_period(mq);
1625*4882a593Smuzhiyun end_cache_period(mq);
1626*4882a593Smuzhiyun spin_unlock_irqrestore(&mq->lock, flags);
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun
smq_allow_migrations(struct dm_cache_policy * p,bool allow)1629*4882a593Smuzhiyun static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
1630*4882a593Smuzhiyun {
1631*4882a593Smuzhiyun struct smq_policy *mq = to_smq_policy(p);
1632*4882a593Smuzhiyun mq->migrations_allowed = allow;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun /*
1636*4882a593Smuzhiyun * smq has no config values, but the old mq policy did. To avoid breaking
1637*4882a593Smuzhiyun * software we continue to accept these configurables for the mq policy,
1638*4882a593Smuzhiyun * but they have no effect.
1639*4882a593Smuzhiyun */
mq_set_config_value(struct dm_cache_policy * p,const char * key,const char * value)1640*4882a593Smuzhiyun static int mq_set_config_value(struct dm_cache_policy *p,
1641*4882a593Smuzhiyun const char *key, const char *value)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun unsigned long tmp;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun if (kstrtoul(value, 10, &tmp))
1646*4882a593Smuzhiyun return -EINVAL;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun if (!strcasecmp(key, "random_threshold") ||
1649*4882a593Smuzhiyun !strcasecmp(key, "sequential_threshold") ||
1650*4882a593Smuzhiyun !strcasecmp(key, "discard_promote_adjustment") ||
1651*4882a593Smuzhiyun !strcasecmp(key, "read_promote_adjustment") ||
1652*4882a593Smuzhiyun !strcasecmp(key, "write_promote_adjustment")) {
1653*4882a593Smuzhiyun DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
1654*4882a593Smuzhiyun return 0;
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun return -EINVAL;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
mq_emit_config_values(struct dm_cache_policy * p,char * result,unsigned maxlen,ssize_t * sz_ptr)1660*4882a593Smuzhiyun static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
1661*4882a593Smuzhiyun unsigned maxlen, ssize_t *sz_ptr)
1662*4882a593Smuzhiyun {
1663*4882a593Smuzhiyun ssize_t sz = *sz_ptr;
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun DMEMIT("10 random_threshold 0 "
1666*4882a593Smuzhiyun "sequential_threshold 0 "
1667*4882a593Smuzhiyun "discard_promote_adjustment 0 "
1668*4882a593Smuzhiyun "read_promote_adjustment 0 "
1669*4882a593Smuzhiyun "write_promote_adjustment 0 ");
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun *sz_ptr = sz;
1672*4882a593Smuzhiyun return 0;
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun /* Init the policy plugin interface function pointers. */
init_policy_functions(struct smq_policy * mq,bool mimic_mq)1676*4882a593Smuzhiyun static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
1677*4882a593Smuzhiyun {
1678*4882a593Smuzhiyun mq->policy.destroy = smq_destroy;
1679*4882a593Smuzhiyun mq->policy.lookup = smq_lookup;
1680*4882a593Smuzhiyun mq->policy.lookup_with_work = smq_lookup_with_work;
1681*4882a593Smuzhiyun mq->policy.get_background_work = smq_get_background_work;
1682*4882a593Smuzhiyun mq->policy.complete_background_work = smq_complete_background_work;
1683*4882a593Smuzhiyun mq->policy.set_dirty = smq_set_dirty;
1684*4882a593Smuzhiyun mq->policy.clear_dirty = smq_clear_dirty;
1685*4882a593Smuzhiyun mq->policy.load_mapping = smq_load_mapping;
1686*4882a593Smuzhiyun mq->policy.invalidate_mapping = smq_invalidate_mapping;
1687*4882a593Smuzhiyun mq->policy.get_hint = smq_get_hint;
1688*4882a593Smuzhiyun mq->policy.residency = smq_residency;
1689*4882a593Smuzhiyun mq->policy.tick = smq_tick;
1690*4882a593Smuzhiyun mq->policy.allow_migrations = smq_allow_migrations;
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun if (mimic_mq) {
1693*4882a593Smuzhiyun mq->policy.set_config_value = mq_set_config_value;
1694*4882a593Smuzhiyun mq->policy.emit_config_values = mq_emit_config_values;
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
too_many_hotspot_blocks(sector_t origin_size,sector_t hotspot_block_size,unsigned nr_hotspot_blocks)1698*4882a593Smuzhiyun static bool too_many_hotspot_blocks(sector_t origin_size,
1699*4882a593Smuzhiyun sector_t hotspot_block_size,
1700*4882a593Smuzhiyun unsigned nr_hotspot_blocks)
1701*4882a593Smuzhiyun {
1702*4882a593Smuzhiyun return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun
calc_hotspot_params(sector_t origin_size,sector_t cache_block_size,unsigned nr_cache_blocks,sector_t * hotspot_block_size,unsigned * nr_hotspot_blocks)1705*4882a593Smuzhiyun static void calc_hotspot_params(sector_t origin_size,
1706*4882a593Smuzhiyun sector_t cache_block_size,
1707*4882a593Smuzhiyun unsigned nr_cache_blocks,
1708*4882a593Smuzhiyun sector_t *hotspot_block_size,
1709*4882a593Smuzhiyun unsigned *nr_hotspot_blocks)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun *hotspot_block_size = cache_block_size * 16u;
1712*4882a593Smuzhiyun *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun while ((*hotspot_block_size > cache_block_size) &&
1715*4882a593Smuzhiyun too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
1716*4882a593Smuzhiyun *hotspot_block_size /= 2u;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun
__smq_create(dm_cblock_t cache_size,sector_t origin_size,sector_t cache_block_size,bool mimic_mq,bool migrations_allowed)1719*4882a593Smuzhiyun static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
1720*4882a593Smuzhiyun sector_t origin_size,
1721*4882a593Smuzhiyun sector_t cache_block_size,
1722*4882a593Smuzhiyun bool mimic_mq,
1723*4882a593Smuzhiyun bool migrations_allowed)
1724*4882a593Smuzhiyun {
1725*4882a593Smuzhiyun unsigned i;
1726*4882a593Smuzhiyun unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
1727*4882a593Smuzhiyun unsigned total_sentinels = 2u * nr_sentinels_per_queue;
1728*4882a593Smuzhiyun struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun if (!mq)
1731*4882a593Smuzhiyun return NULL;
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun init_policy_functions(mq, mimic_mq);
1734*4882a593Smuzhiyun mq->cache_size = cache_size;
1735*4882a593Smuzhiyun mq->cache_block_size = cache_block_size;
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
1738*4882a593Smuzhiyun &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
1741*4882a593Smuzhiyun mq->hotspot_level_jump = 1u;
1742*4882a593Smuzhiyun if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
1743*4882a593Smuzhiyun DMERR("couldn't initialize entry space");
1744*4882a593Smuzhiyun goto bad_pool_init;
1745*4882a593Smuzhiyun }
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
1748*4882a593Smuzhiyun for (i = 0; i < nr_sentinels_per_queue; i++)
1749*4882a593Smuzhiyun get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
1752*4882a593Smuzhiyun for (i = 0; i < nr_sentinels_per_queue; i++)
1753*4882a593Smuzhiyun get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
1756*4882a593Smuzhiyun total_sentinels + mq->nr_hotspot_blocks);
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun init_allocator(&mq->cache_alloc, &mq->es,
1759*4882a593Smuzhiyun total_sentinels + mq->nr_hotspot_blocks,
1760*4882a593Smuzhiyun total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
1763*4882a593Smuzhiyun if (!mq->hotspot_hit_bits) {
1764*4882a593Smuzhiyun DMERR("couldn't allocate hotspot hit bitset");
1765*4882a593Smuzhiyun goto bad_hotspot_hit_bits;
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun if (from_cblock(cache_size)) {
1770*4882a593Smuzhiyun mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1771*4882a593Smuzhiyun if (!mq->cache_hit_bits) {
1772*4882a593Smuzhiyun DMERR("couldn't allocate cache hit bitset");
1773*4882a593Smuzhiyun goto bad_cache_hit_bits;
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1776*4882a593Smuzhiyun } else
1777*4882a593Smuzhiyun mq->cache_hit_bits = NULL;
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun mq->tick = 0;
1780*4882a593Smuzhiyun spin_lock_init(&mq->lock);
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
1783*4882a593Smuzhiyun mq->hotspot.nr_top_levels = 8;
1784*4882a593Smuzhiyun mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
1785*4882a593Smuzhiyun from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
1788*4882a593Smuzhiyun q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
1791*4882a593Smuzhiyun stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
1794*4882a593Smuzhiyun goto bad_alloc_table;
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
1797*4882a593Smuzhiyun goto bad_alloc_hotspot_table;
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun sentinels_init(mq);
1800*4882a593Smuzhiyun mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun mq->next_hotspot_period = jiffies;
1803*4882a593Smuzhiyun mq->next_cache_period = jiffies;
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
1806*4882a593Smuzhiyun if (!mq->bg_work)
1807*4882a593Smuzhiyun goto bad_btracker;
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun mq->migrations_allowed = migrations_allowed;
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun return &mq->policy;
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun bad_btracker:
1814*4882a593Smuzhiyun h_exit(&mq->hotspot_table);
1815*4882a593Smuzhiyun bad_alloc_hotspot_table:
1816*4882a593Smuzhiyun h_exit(&mq->table);
1817*4882a593Smuzhiyun bad_alloc_table:
1818*4882a593Smuzhiyun free_bitset(mq->cache_hit_bits);
1819*4882a593Smuzhiyun bad_cache_hit_bits:
1820*4882a593Smuzhiyun free_bitset(mq->hotspot_hit_bits);
1821*4882a593Smuzhiyun bad_hotspot_hit_bits:
1822*4882a593Smuzhiyun space_exit(&mq->es);
1823*4882a593Smuzhiyun bad_pool_init:
1824*4882a593Smuzhiyun kfree(mq);
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun return NULL;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
smq_create(dm_cblock_t cache_size,sector_t origin_size,sector_t cache_block_size)1829*4882a593Smuzhiyun static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1830*4882a593Smuzhiyun sector_t origin_size,
1831*4882a593Smuzhiyun sector_t cache_block_size)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun return __smq_create(cache_size, origin_size, cache_block_size, false, true);
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun
mq_create(dm_cblock_t cache_size,sector_t origin_size,sector_t cache_block_size)1836*4882a593Smuzhiyun static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1837*4882a593Smuzhiyun sector_t origin_size,
1838*4882a593Smuzhiyun sector_t cache_block_size)
1839*4882a593Smuzhiyun {
1840*4882a593Smuzhiyun return __smq_create(cache_size, origin_size, cache_block_size, true, true);
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
cleaner_create(dm_cblock_t cache_size,sector_t origin_size,sector_t cache_block_size)1843*4882a593Smuzhiyun static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
1844*4882a593Smuzhiyun sector_t origin_size,
1845*4882a593Smuzhiyun sector_t cache_block_size)
1846*4882a593Smuzhiyun {
1847*4882a593Smuzhiyun return __smq_create(cache_size, origin_size, cache_block_size, false, false);
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun /*----------------------------------------------------------------*/
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun static struct dm_cache_policy_type smq_policy_type = {
1853*4882a593Smuzhiyun .name = "smq",
1854*4882a593Smuzhiyun .version = {2, 0, 0},
1855*4882a593Smuzhiyun .hint_size = 4,
1856*4882a593Smuzhiyun .owner = THIS_MODULE,
1857*4882a593Smuzhiyun .create = smq_create
1858*4882a593Smuzhiyun };
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun static struct dm_cache_policy_type mq_policy_type = {
1861*4882a593Smuzhiyun .name = "mq",
1862*4882a593Smuzhiyun .version = {2, 0, 0},
1863*4882a593Smuzhiyun .hint_size = 4,
1864*4882a593Smuzhiyun .owner = THIS_MODULE,
1865*4882a593Smuzhiyun .create = mq_create,
1866*4882a593Smuzhiyun };
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun static struct dm_cache_policy_type cleaner_policy_type = {
1869*4882a593Smuzhiyun .name = "cleaner",
1870*4882a593Smuzhiyun .version = {2, 0, 0},
1871*4882a593Smuzhiyun .hint_size = 4,
1872*4882a593Smuzhiyun .owner = THIS_MODULE,
1873*4882a593Smuzhiyun .create = cleaner_create,
1874*4882a593Smuzhiyun };
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun static struct dm_cache_policy_type default_policy_type = {
1877*4882a593Smuzhiyun .name = "default",
1878*4882a593Smuzhiyun .version = {2, 0, 0},
1879*4882a593Smuzhiyun .hint_size = 4,
1880*4882a593Smuzhiyun .owner = THIS_MODULE,
1881*4882a593Smuzhiyun .create = smq_create,
1882*4882a593Smuzhiyun .real = &smq_policy_type
1883*4882a593Smuzhiyun };
1884*4882a593Smuzhiyun
smq_init(void)1885*4882a593Smuzhiyun static int __init smq_init(void)
1886*4882a593Smuzhiyun {
1887*4882a593Smuzhiyun int r;
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun r = dm_cache_policy_register(&smq_policy_type);
1890*4882a593Smuzhiyun if (r) {
1891*4882a593Smuzhiyun DMERR("register failed %d", r);
1892*4882a593Smuzhiyun return -ENOMEM;
1893*4882a593Smuzhiyun }
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun r = dm_cache_policy_register(&mq_policy_type);
1896*4882a593Smuzhiyun if (r) {
1897*4882a593Smuzhiyun DMERR("register failed (as mq) %d", r);
1898*4882a593Smuzhiyun goto out_mq;
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun r = dm_cache_policy_register(&cleaner_policy_type);
1902*4882a593Smuzhiyun if (r) {
1903*4882a593Smuzhiyun DMERR("register failed (as cleaner) %d", r);
1904*4882a593Smuzhiyun goto out_cleaner;
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun r = dm_cache_policy_register(&default_policy_type);
1908*4882a593Smuzhiyun if (r) {
1909*4882a593Smuzhiyun DMERR("register failed (as default) %d", r);
1910*4882a593Smuzhiyun goto out_default;
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun return 0;
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun out_default:
1916*4882a593Smuzhiyun dm_cache_policy_unregister(&cleaner_policy_type);
1917*4882a593Smuzhiyun out_cleaner:
1918*4882a593Smuzhiyun dm_cache_policy_unregister(&mq_policy_type);
1919*4882a593Smuzhiyun out_mq:
1920*4882a593Smuzhiyun dm_cache_policy_unregister(&smq_policy_type);
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun return -ENOMEM;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun
smq_exit(void)1925*4882a593Smuzhiyun static void __exit smq_exit(void)
1926*4882a593Smuzhiyun {
1927*4882a593Smuzhiyun dm_cache_policy_unregister(&cleaner_policy_type);
1928*4882a593Smuzhiyun dm_cache_policy_unregister(&smq_policy_type);
1929*4882a593Smuzhiyun dm_cache_policy_unregister(&mq_policy_type);
1930*4882a593Smuzhiyun dm_cache_policy_unregister(&default_policy_type);
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun module_init(smq_init);
1934*4882a593Smuzhiyun module_exit(smq_exit);
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1937*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1938*4882a593Smuzhiyun MODULE_DESCRIPTION("smq cache policy");
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun MODULE_ALIAS("dm-cache-default");
1941*4882a593Smuzhiyun MODULE_ALIAS("dm-cache-mq");
1942*4882a593Smuzhiyun MODULE_ALIAS("dm-cache-cleaner");
1943