1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * The Kyber I/O scheduler. Controls latency by throttling queue depths using
4*4882a593Smuzhiyun * scalable techniques.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2017 Facebook
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun #include <linux/blk-mq.h>
12*4882a593Smuzhiyun #include <linux/elevator.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/sbitmap.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "blk.h"
17*4882a593Smuzhiyun #include "blk-mq.h"
18*4882a593Smuzhiyun #include "blk-mq-debugfs.h"
19*4882a593Smuzhiyun #include "blk-mq-sched.h"
20*4882a593Smuzhiyun #include "blk-mq-tag.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
23*4882a593Smuzhiyun #include <trace/events/kyber.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Scheduling domains: the device is divided into multiple domains based on the
27*4882a593Smuzhiyun * request type.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun enum {
30*4882a593Smuzhiyun KYBER_READ,
31*4882a593Smuzhiyun KYBER_WRITE,
32*4882a593Smuzhiyun KYBER_DISCARD,
33*4882a593Smuzhiyun KYBER_OTHER,
34*4882a593Smuzhiyun KYBER_NUM_DOMAINS,
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static const char *kyber_domain_names[] = {
38*4882a593Smuzhiyun [KYBER_READ] = "READ",
39*4882a593Smuzhiyun [KYBER_WRITE] = "WRITE",
40*4882a593Smuzhiyun [KYBER_DISCARD] = "DISCARD",
41*4882a593Smuzhiyun [KYBER_OTHER] = "OTHER",
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun enum {
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * In order to prevent starvation of synchronous requests by a flood of
47*4882a593Smuzhiyun * asynchronous requests, we reserve 25% of requests for synchronous
48*4882a593Smuzhiyun * operations.
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun KYBER_ASYNC_PERCENT = 75,
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Maximum device-wide depth for each scheduling domain.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Even for fast devices with lots of tags like NVMe, you can saturate the
57*4882a593Smuzhiyun * device with only a fraction of the maximum possible queue depth. So, we cap
58*4882a593Smuzhiyun * these to a reasonable value.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun static const unsigned int kyber_depth[] = {
61*4882a593Smuzhiyun [KYBER_READ] = 256,
62*4882a593Smuzhiyun [KYBER_WRITE] = 128,
63*4882a593Smuzhiyun [KYBER_DISCARD] = 64,
64*4882a593Smuzhiyun [KYBER_OTHER] = 16,
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Default latency targets for each scheduling domain.
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun static const u64 kyber_latency_targets[] = {
71*4882a593Smuzhiyun [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
72*4882a593Smuzhiyun [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
73*4882a593Smuzhiyun [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * Batch size (number of requests we'll dispatch in a row) for each scheduling
78*4882a593Smuzhiyun * domain.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun static const unsigned int kyber_batch_size[] = {
81*4882a593Smuzhiyun [KYBER_READ] = 16,
82*4882a593Smuzhiyun [KYBER_WRITE] = 8,
83*4882a593Smuzhiyun [KYBER_DISCARD] = 1,
84*4882a593Smuzhiyun [KYBER_OTHER] = 1,
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * Requests latencies are recorded in a histogram with buckets defined relative
89*4882a593Smuzhiyun * to the target latency:
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * <= 1/4 * target latency
92*4882a593Smuzhiyun * <= 1/2 * target latency
93*4882a593Smuzhiyun * <= 3/4 * target latency
94*4882a593Smuzhiyun * <= target latency
95*4882a593Smuzhiyun * <= 1 1/4 * target latency
96*4882a593Smuzhiyun * <= 1 1/2 * target latency
97*4882a593Smuzhiyun * <= 1 3/4 * target latency
98*4882a593Smuzhiyun * > 1 3/4 * target latency
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun enum {
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * The width of the latency histogram buckets is
103*4882a593Smuzhiyun * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun KYBER_LATENCY_SHIFT = 2,
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
108*4882a593Smuzhiyun * thus, "good".
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
111*4882a593Smuzhiyun /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
112*4882a593Smuzhiyun KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * We measure both the total latency and the I/O latency (i.e., latency after
117*4882a593Smuzhiyun * submitting to the device).
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun enum {
120*4882a593Smuzhiyun KYBER_TOTAL_LATENCY,
121*4882a593Smuzhiyun KYBER_IO_LATENCY,
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun static const char *kyber_latency_type_names[] = {
125*4882a593Smuzhiyun [KYBER_TOTAL_LATENCY] = "total",
126*4882a593Smuzhiyun [KYBER_IO_LATENCY] = "I/O",
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * Per-cpu latency histograms: total latency and I/O latency for each scheduling
131*4882a593Smuzhiyun * domain except for KYBER_OTHER.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun struct kyber_cpu_latency {
134*4882a593Smuzhiyun atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * There is a same mapping between ctx & hctx and kcq & khd,
139*4882a593Smuzhiyun * we use request->mq_ctx->index_hw to index the kcq in khd.
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun struct kyber_ctx_queue {
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
144*4882a593Smuzhiyun * Also protect the rqs on rq_list when merge.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun spinlock_t lock;
147*4882a593Smuzhiyun struct list_head rq_list[KYBER_NUM_DOMAINS];
148*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun struct kyber_queue_data {
151*4882a593Smuzhiyun struct request_queue *q;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * Each scheduling domain has a limited number of in-flight requests
155*4882a593Smuzhiyun * device-wide, limited by these tokens.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun * Async request percentage, converted to per-word depth for
161*4882a593Smuzhiyun * sbitmap_get_shallow().
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun unsigned int async_depth;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun struct kyber_cpu_latency __percpu *cpu_latency;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Timer for stats aggregation and adjusting domain tokens. */
168*4882a593Smuzhiyun struct timer_list timer;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun unsigned long latency_timeout[KYBER_OTHER];
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun int domain_p99[KYBER_OTHER];
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Target latencies in nanoseconds. */
177*4882a593Smuzhiyun u64 latency_targets[KYBER_OTHER];
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun struct kyber_hctx_data {
181*4882a593Smuzhiyun spinlock_t lock;
182*4882a593Smuzhiyun struct list_head rqs[KYBER_NUM_DOMAINS];
183*4882a593Smuzhiyun unsigned int cur_domain;
184*4882a593Smuzhiyun unsigned int batching;
185*4882a593Smuzhiyun struct kyber_ctx_queue *kcqs;
186*4882a593Smuzhiyun struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
187*4882a593Smuzhiyun struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
188*4882a593Smuzhiyun struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
189*4882a593Smuzhiyun atomic_t wait_index[KYBER_NUM_DOMAINS];
190*4882a593Smuzhiyun };
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
193*4882a593Smuzhiyun void *key);
194*4882a593Smuzhiyun
kyber_sched_domain(unsigned int op)195*4882a593Smuzhiyun static unsigned int kyber_sched_domain(unsigned int op)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun switch (op & REQ_OP_MASK) {
198*4882a593Smuzhiyun case REQ_OP_READ:
199*4882a593Smuzhiyun return KYBER_READ;
200*4882a593Smuzhiyun case REQ_OP_WRITE:
201*4882a593Smuzhiyun return KYBER_WRITE;
202*4882a593Smuzhiyun case REQ_OP_DISCARD:
203*4882a593Smuzhiyun return KYBER_DISCARD;
204*4882a593Smuzhiyun default:
205*4882a593Smuzhiyun return KYBER_OTHER;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
flush_latency_buckets(struct kyber_queue_data * kqd,struct kyber_cpu_latency * cpu_latency,unsigned int sched_domain,unsigned int type)209*4882a593Smuzhiyun static void flush_latency_buckets(struct kyber_queue_data *kqd,
210*4882a593Smuzhiyun struct kyber_cpu_latency *cpu_latency,
211*4882a593Smuzhiyun unsigned int sched_domain, unsigned int type)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
214*4882a593Smuzhiyun atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
215*4882a593Smuzhiyun unsigned int bucket;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
218*4882a593Smuzhiyun buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * Calculate the histogram bucket with the given percentile rank, or -1 if there
223*4882a593Smuzhiyun * aren't enough samples yet.
224*4882a593Smuzhiyun */
calculate_percentile(struct kyber_queue_data * kqd,unsigned int sched_domain,unsigned int type,unsigned int percentile)225*4882a593Smuzhiyun static int calculate_percentile(struct kyber_queue_data *kqd,
226*4882a593Smuzhiyun unsigned int sched_domain, unsigned int type,
227*4882a593Smuzhiyun unsigned int percentile)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
230*4882a593Smuzhiyun unsigned int bucket, samples = 0, percentile_samples;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
233*4882a593Smuzhiyun samples += buckets[bucket];
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (!samples)
236*4882a593Smuzhiyun return -1;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun * We do the calculation once we have 500 samples or one second passes
240*4882a593Smuzhiyun * since the first sample was recorded, whichever comes first.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun if (!kqd->latency_timeout[sched_domain])
243*4882a593Smuzhiyun kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
244*4882a593Smuzhiyun if (samples < 500 &&
245*4882a593Smuzhiyun time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
246*4882a593Smuzhiyun return -1;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun kqd->latency_timeout[sched_domain] = 0;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
251*4882a593Smuzhiyun for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
252*4882a593Smuzhiyun if (buckets[bucket] >= percentile_samples)
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun percentile_samples -= buckets[bucket];
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
259*4882a593Smuzhiyun kyber_latency_type_names[type], percentile,
260*4882a593Smuzhiyun bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return bucket;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
kyber_resize_domain(struct kyber_queue_data * kqd,unsigned int sched_domain,unsigned int depth)265*4882a593Smuzhiyun static void kyber_resize_domain(struct kyber_queue_data *kqd,
266*4882a593Smuzhiyun unsigned int sched_domain, unsigned int depth)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun depth = clamp(depth, 1U, kyber_depth[sched_domain]);
269*4882a593Smuzhiyun if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
270*4882a593Smuzhiyun sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
271*4882a593Smuzhiyun trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
272*4882a593Smuzhiyun depth);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
kyber_timer_fn(struct timer_list * t)276*4882a593Smuzhiyun static void kyber_timer_fn(struct timer_list *t)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
279*4882a593Smuzhiyun unsigned int sched_domain;
280*4882a593Smuzhiyun int cpu;
281*4882a593Smuzhiyun bool bad = false;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* Sum all of the per-cpu latency histograms. */
284*4882a593Smuzhiyun for_each_online_cpu(cpu) {
285*4882a593Smuzhiyun struct kyber_cpu_latency *cpu_latency;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
288*4882a593Smuzhiyun for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
289*4882a593Smuzhiyun flush_latency_buckets(kqd, cpu_latency, sched_domain,
290*4882a593Smuzhiyun KYBER_TOTAL_LATENCY);
291*4882a593Smuzhiyun flush_latency_buckets(kqd, cpu_latency, sched_domain,
292*4882a593Smuzhiyun KYBER_IO_LATENCY);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * Check if any domains have a high I/O latency, which might indicate
298*4882a593Smuzhiyun * congestion in the device. Note that we use the p90; we don't want to
299*4882a593Smuzhiyun * be too sensitive to outliers here.
300*4882a593Smuzhiyun */
301*4882a593Smuzhiyun for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
302*4882a593Smuzhiyun int p90;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
305*4882a593Smuzhiyun 90);
306*4882a593Smuzhiyun if (p90 >= KYBER_GOOD_BUCKETS)
307*4882a593Smuzhiyun bad = true;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun * Adjust the scheduling domain depths. If we determined that there was
312*4882a593Smuzhiyun * congestion, we throttle all domains with good latencies. Either way,
313*4882a593Smuzhiyun * we ease up on throttling domains with bad latencies.
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
316*4882a593Smuzhiyun unsigned int orig_depth, depth;
317*4882a593Smuzhiyun int p99;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun p99 = calculate_percentile(kqd, sched_domain,
320*4882a593Smuzhiyun KYBER_TOTAL_LATENCY, 99);
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * This is kind of subtle: different domains will not
323*4882a593Smuzhiyun * necessarily have enough samples to calculate the latency
324*4882a593Smuzhiyun * percentiles during the same window, so we have to remember
325*4882a593Smuzhiyun * the p99 for the next time we observe congestion; once we do,
326*4882a593Smuzhiyun * we don't want to throttle again until we get more data, so we
327*4882a593Smuzhiyun * reset it to -1.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun if (bad) {
330*4882a593Smuzhiyun if (p99 < 0)
331*4882a593Smuzhiyun p99 = kqd->domain_p99[sched_domain];
332*4882a593Smuzhiyun kqd->domain_p99[sched_domain] = -1;
333*4882a593Smuzhiyun } else if (p99 >= 0) {
334*4882a593Smuzhiyun kqd->domain_p99[sched_domain] = p99;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun if (p99 < 0)
337*4882a593Smuzhiyun continue;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun * If this domain has bad latency, throttle less. Otherwise,
341*4882a593Smuzhiyun * throttle more iff we determined that there is congestion.
342*4882a593Smuzhiyun *
343*4882a593Smuzhiyun * The new depth is scaled linearly with the p99 latency vs the
344*4882a593Smuzhiyun * latency target. E.g., if the p99 is 3/4 of the target, then
345*4882a593Smuzhiyun * we throttle down to 3/4 of the current depth, and if the p99
346*4882a593Smuzhiyun * is 2x the target, then we double the depth.
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun if (bad || p99 >= KYBER_GOOD_BUCKETS) {
349*4882a593Smuzhiyun orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
350*4882a593Smuzhiyun depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
351*4882a593Smuzhiyun kyber_resize_domain(kqd, sched_domain, depth);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
kyber_sched_tags_shift(struct request_queue * q)356*4882a593Smuzhiyun static unsigned int kyber_sched_tags_shift(struct request_queue *q)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun * All of the hardware queues have the same depth, so we can just grab
360*4882a593Smuzhiyun * the shift of the first one.
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
kyber_queue_data_alloc(struct request_queue * q)365*4882a593Smuzhiyun static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun struct kyber_queue_data *kqd;
368*4882a593Smuzhiyun unsigned int shift;
369*4882a593Smuzhiyun int ret = -ENOMEM;
370*4882a593Smuzhiyun int i;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
373*4882a593Smuzhiyun if (!kqd)
374*4882a593Smuzhiyun goto err;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun kqd->q = q;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
379*4882a593Smuzhiyun GFP_KERNEL | __GFP_ZERO);
380*4882a593Smuzhiyun if (!kqd->cpu_latency)
381*4882a593Smuzhiyun goto err_kqd;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun timer_setup(&kqd->timer, kyber_timer_fn, 0);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
386*4882a593Smuzhiyun WARN_ON(!kyber_depth[i]);
387*4882a593Smuzhiyun WARN_ON(!kyber_batch_size[i]);
388*4882a593Smuzhiyun ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
389*4882a593Smuzhiyun kyber_depth[i], -1, false,
390*4882a593Smuzhiyun GFP_KERNEL, q->node);
391*4882a593Smuzhiyun if (ret) {
392*4882a593Smuzhiyun while (--i >= 0)
393*4882a593Smuzhiyun sbitmap_queue_free(&kqd->domain_tokens[i]);
394*4882a593Smuzhiyun goto err_buckets;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun for (i = 0; i < KYBER_OTHER; i++) {
399*4882a593Smuzhiyun kqd->domain_p99[i] = -1;
400*4882a593Smuzhiyun kqd->latency_targets[i] = kyber_latency_targets[i];
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun shift = kyber_sched_tags_shift(q);
404*4882a593Smuzhiyun kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return kqd;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun err_buckets:
409*4882a593Smuzhiyun free_percpu(kqd->cpu_latency);
410*4882a593Smuzhiyun err_kqd:
411*4882a593Smuzhiyun kfree(kqd);
412*4882a593Smuzhiyun err:
413*4882a593Smuzhiyun return ERR_PTR(ret);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
kyber_init_sched(struct request_queue * q,struct elevator_type * e)416*4882a593Smuzhiyun static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct kyber_queue_data *kqd;
419*4882a593Smuzhiyun struct elevator_queue *eq;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun eq = elevator_alloc(q, e);
422*4882a593Smuzhiyun if (!eq)
423*4882a593Smuzhiyun return -ENOMEM;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun kqd = kyber_queue_data_alloc(q);
426*4882a593Smuzhiyun if (IS_ERR(kqd)) {
427*4882a593Smuzhiyun kobject_put(&eq->kobj);
428*4882a593Smuzhiyun return PTR_ERR(kqd);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun blk_stat_enable_accounting(q);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun eq->elevator_data = kqd;
434*4882a593Smuzhiyun q->elevator = eq;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun return 0;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
kyber_exit_sched(struct elevator_queue * e)439*4882a593Smuzhiyun static void kyber_exit_sched(struct elevator_queue *e)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct kyber_queue_data *kqd = e->elevator_data;
442*4882a593Smuzhiyun int i;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun del_timer_sync(&kqd->timer);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun for (i = 0; i < KYBER_NUM_DOMAINS; i++)
447*4882a593Smuzhiyun sbitmap_queue_free(&kqd->domain_tokens[i]);
448*4882a593Smuzhiyun free_percpu(kqd->cpu_latency);
449*4882a593Smuzhiyun kfree(kqd);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
kyber_ctx_queue_init(struct kyber_ctx_queue * kcq)452*4882a593Smuzhiyun static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun unsigned int i;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun spin_lock_init(&kcq->lock);
457*4882a593Smuzhiyun for (i = 0; i < KYBER_NUM_DOMAINS; i++)
458*4882a593Smuzhiyun INIT_LIST_HEAD(&kcq->rq_list[i]);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
kyber_init_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)461*4882a593Smuzhiyun static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
464*4882a593Smuzhiyun struct kyber_hctx_data *khd;
465*4882a593Smuzhiyun int i;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
468*4882a593Smuzhiyun if (!khd)
469*4882a593Smuzhiyun return -ENOMEM;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
472*4882a593Smuzhiyun sizeof(struct kyber_ctx_queue),
473*4882a593Smuzhiyun GFP_KERNEL, hctx->numa_node);
474*4882a593Smuzhiyun if (!khd->kcqs)
475*4882a593Smuzhiyun goto err_khd;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun for (i = 0; i < hctx->nr_ctx; i++)
478*4882a593Smuzhiyun kyber_ctx_queue_init(&khd->kcqs[i]);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
481*4882a593Smuzhiyun if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
482*4882a593Smuzhiyun ilog2(8), GFP_KERNEL, hctx->numa_node)) {
483*4882a593Smuzhiyun while (--i >= 0)
484*4882a593Smuzhiyun sbitmap_free(&khd->kcq_map[i]);
485*4882a593Smuzhiyun goto err_kcqs;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun spin_lock_init(&khd->lock);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
492*4882a593Smuzhiyun INIT_LIST_HEAD(&khd->rqs[i]);
493*4882a593Smuzhiyun khd->domain_wait[i].sbq = NULL;
494*4882a593Smuzhiyun init_waitqueue_func_entry(&khd->domain_wait[i].wait,
495*4882a593Smuzhiyun kyber_domain_wake);
496*4882a593Smuzhiyun khd->domain_wait[i].wait.private = hctx;
497*4882a593Smuzhiyun INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
498*4882a593Smuzhiyun atomic_set(&khd->wait_index[i], 0);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun khd->cur_domain = 0;
502*4882a593Smuzhiyun khd->batching = 0;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun hctx->sched_data = khd;
505*4882a593Smuzhiyun sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
506*4882a593Smuzhiyun kqd->async_depth);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return 0;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun err_kcqs:
511*4882a593Smuzhiyun kfree(khd->kcqs);
512*4882a593Smuzhiyun err_khd:
513*4882a593Smuzhiyun kfree(khd);
514*4882a593Smuzhiyun return -ENOMEM;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
kyber_exit_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)517*4882a593Smuzhiyun static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data;
520*4882a593Smuzhiyun int i;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun for (i = 0; i < KYBER_NUM_DOMAINS; i++)
523*4882a593Smuzhiyun sbitmap_free(&khd->kcq_map[i]);
524*4882a593Smuzhiyun kfree(khd->kcqs);
525*4882a593Smuzhiyun kfree(hctx->sched_data);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
rq_get_domain_token(struct request * rq)528*4882a593Smuzhiyun static int rq_get_domain_token(struct request *rq)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun return (long)rq->elv.priv[0];
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
rq_set_domain_token(struct request * rq,int token)533*4882a593Smuzhiyun static void rq_set_domain_token(struct request *rq, int token)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun rq->elv.priv[0] = (void *)(long)token;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
rq_clear_domain_token(struct kyber_queue_data * kqd,struct request * rq)538*4882a593Smuzhiyun static void rq_clear_domain_token(struct kyber_queue_data *kqd,
539*4882a593Smuzhiyun struct request *rq)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun unsigned int sched_domain;
542*4882a593Smuzhiyun int nr;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun nr = rq_get_domain_token(rq);
545*4882a593Smuzhiyun if (nr != -1) {
546*4882a593Smuzhiyun sched_domain = kyber_sched_domain(rq->cmd_flags);
547*4882a593Smuzhiyun sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
548*4882a593Smuzhiyun rq->mq_ctx->cpu);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
kyber_limit_depth(unsigned int op,struct blk_mq_alloc_data * data)552*4882a593Smuzhiyun static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun /*
555*4882a593Smuzhiyun * We use the scheduler tags as per-hardware queue queueing tokens.
556*4882a593Smuzhiyun * Async requests can be limited at this stage.
557*4882a593Smuzhiyun */
558*4882a593Smuzhiyun if (!op_is_sync(op)) {
559*4882a593Smuzhiyun struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun data->shallow_depth = kqd->async_depth;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
kyber_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)565*4882a593Smuzhiyun static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
566*4882a593Smuzhiyun unsigned int nr_segs)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
569*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
570*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data;
571*4882a593Smuzhiyun struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
572*4882a593Smuzhiyun unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
573*4882a593Smuzhiyun struct list_head *rq_list = &kcq->rq_list[sched_domain];
574*4882a593Smuzhiyun bool merged;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun spin_lock(&kcq->lock);
577*4882a593Smuzhiyun merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
578*4882a593Smuzhiyun spin_unlock(&kcq->lock);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun return merged;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
kyber_prepare_request(struct request * rq)583*4882a593Smuzhiyun static void kyber_prepare_request(struct request *rq)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun rq_set_domain_token(rq, -1);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
kyber_insert_requests(struct blk_mq_hw_ctx * hctx,struct list_head * rq_list,bool at_head)588*4882a593Smuzhiyun static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
589*4882a593Smuzhiyun struct list_head *rq_list, bool at_head)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data;
592*4882a593Smuzhiyun struct request *rq, *next;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun list_for_each_entry_safe(rq, next, rq_list, queuelist) {
595*4882a593Smuzhiyun unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
596*4882a593Smuzhiyun struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
597*4882a593Smuzhiyun struct list_head *head = &kcq->rq_list[sched_domain];
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun spin_lock(&kcq->lock);
600*4882a593Smuzhiyun if (at_head)
601*4882a593Smuzhiyun list_move(&rq->queuelist, head);
602*4882a593Smuzhiyun else
603*4882a593Smuzhiyun list_move_tail(&rq->queuelist, head);
604*4882a593Smuzhiyun sbitmap_set_bit(&khd->kcq_map[sched_domain],
605*4882a593Smuzhiyun rq->mq_ctx->index_hw[hctx->type]);
606*4882a593Smuzhiyun blk_mq_sched_request_inserted(rq);
607*4882a593Smuzhiyun spin_unlock(&kcq->lock);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
kyber_finish_request(struct request * rq)611*4882a593Smuzhiyun static void kyber_finish_request(struct request *rq)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun rq_clear_domain_token(kqd, rq);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
add_latency_sample(struct kyber_cpu_latency * cpu_latency,unsigned int sched_domain,unsigned int type,u64 target,u64 latency)618*4882a593Smuzhiyun static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
619*4882a593Smuzhiyun unsigned int sched_domain, unsigned int type,
620*4882a593Smuzhiyun u64 target, u64 latency)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun unsigned int bucket;
623*4882a593Smuzhiyun u64 divisor;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun if (latency > 0) {
626*4882a593Smuzhiyun divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
627*4882a593Smuzhiyun bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
628*4882a593Smuzhiyun KYBER_LATENCY_BUCKETS - 1);
629*4882a593Smuzhiyun } else {
630*4882a593Smuzhiyun bucket = 0;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
kyber_completed_request(struct request * rq,u64 now)636*4882a593Smuzhiyun static void kyber_completed_request(struct request *rq, u64 now)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
639*4882a593Smuzhiyun struct kyber_cpu_latency *cpu_latency;
640*4882a593Smuzhiyun unsigned int sched_domain;
641*4882a593Smuzhiyun u64 target;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun sched_domain = kyber_sched_domain(rq->cmd_flags);
644*4882a593Smuzhiyun if (sched_domain == KYBER_OTHER)
645*4882a593Smuzhiyun return;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun cpu_latency = get_cpu_ptr(kqd->cpu_latency);
648*4882a593Smuzhiyun target = kqd->latency_targets[sched_domain];
649*4882a593Smuzhiyun add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
650*4882a593Smuzhiyun target, now - rq->start_time_ns);
651*4882a593Smuzhiyun add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
652*4882a593Smuzhiyun now - rq->io_start_time_ns);
653*4882a593Smuzhiyun put_cpu_ptr(kqd->cpu_latency);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun timer_reduce(&kqd->timer, jiffies + HZ / 10);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun struct flush_kcq_data {
659*4882a593Smuzhiyun struct kyber_hctx_data *khd;
660*4882a593Smuzhiyun unsigned int sched_domain;
661*4882a593Smuzhiyun struct list_head *list;
662*4882a593Smuzhiyun };
663*4882a593Smuzhiyun
flush_busy_kcq(struct sbitmap * sb,unsigned int bitnr,void * data)664*4882a593Smuzhiyun static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun struct flush_kcq_data *flush_data = data;
667*4882a593Smuzhiyun struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun spin_lock(&kcq->lock);
670*4882a593Smuzhiyun list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
671*4882a593Smuzhiyun flush_data->list);
672*4882a593Smuzhiyun sbitmap_clear_bit(sb, bitnr);
673*4882a593Smuzhiyun spin_unlock(&kcq->lock);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun return true;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
kyber_flush_busy_kcqs(struct kyber_hctx_data * khd,unsigned int sched_domain,struct list_head * list)678*4882a593Smuzhiyun static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
679*4882a593Smuzhiyun unsigned int sched_domain,
680*4882a593Smuzhiyun struct list_head *list)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun struct flush_kcq_data data = {
683*4882a593Smuzhiyun .khd = khd,
684*4882a593Smuzhiyun .sched_domain = sched_domain,
685*4882a593Smuzhiyun .list = list,
686*4882a593Smuzhiyun };
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun sbitmap_for_each_set(&khd->kcq_map[sched_domain],
689*4882a593Smuzhiyun flush_busy_kcq, &data);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
kyber_domain_wake(wait_queue_entry_t * wqe,unsigned mode,int flags,void * key)692*4882a593Smuzhiyun static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
693*4882a593Smuzhiyun void *key)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
696*4882a593Smuzhiyun struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun sbitmap_del_wait_queue(wait);
699*4882a593Smuzhiyun blk_mq_run_hw_queue(hctx, true);
700*4882a593Smuzhiyun return 1;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
kyber_get_domain_token(struct kyber_queue_data * kqd,struct kyber_hctx_data * khd,struct blk_mq_hw_ctx * hctx)703*4882a593Smuzhiyun static int kyber_get_domain_token(struct kyber_queue_data *kqd,
704*4882a593Smuzhiyun struct kyber_hctx_data *khd,
705*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun unsigned int sched_domain = khd->cur_domain;
708*4882a593Smuzhiyun struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
709*4882a593Smuzhiyun struct sbq_wait *wait = &khd->domain_wait[sched_domain];
710*4882a593Smuzhiyun struct sbq_wait_state *ws;
711*4882a593Smuzhiyun int nr;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun nr = __sbitmap_queue_get(domain_tokens);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /*
716*4882a593Smuzhiyun * If we failed to get a domain token, make sure the hardware queue is
717*4882a593Smuzhiyun * run when one becomes available. Note that this is serialized on
718*4882a593Smuzhiyun * khd->lock, but we still need to be careful about the waker.
719*4882a593Smuzhiyun */
720*4882a593Smuzhiyun if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
721*4882a593Smuzhiyun ws = sbq_wait_ptr(domain_tokens,
722*4882a593Smuzhiyun &khd->wait_index[sched_domain]);
723*4882a593Smuzhiyun khd->domain_ws[sched_domain] = ws;
724*4882a593Smuzhiyun sbitmap_add_wait_queue(domain_tokens, ws, wait);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /*
727*4882a593Smuzhiyun * Try again in case a token was freed before we got on the wait
728*4882a593Smuzhiyun * queue.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun nr = __sbitmap_queue_get(domain_tokens);
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /*
734*4882a593Smuzhiyun * If we got a token while we were on the wait queue, remove ourselves
735*4882a593Smuzhiyun * from the wait queue to ensure that all wake ups make forward
736*4882a593Smuzhiyun * progress. It's possible that the waker already deleted the entry
737*4882a593Smuzhiyun * between the !list_empty_careful() check and us grabbing the lock, but
738*4882a593Smuzhiyun * list_del_init() is okay with that.
739*4882a593Smuzhiyun */
740*4882a593Smuzhiyun if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
741*4882a593Smuzhiyun ws = khd->domain_ws[sched_domain];
742*4882a593Smuzhiyun spin_lock_irq(&ws->wait.lock);
743*4882a593Smuzhiyun sbitmap_del_wait_queue(wait);
744*4882a593Smuzhiyun spin_unlock_irq(&ws->wait.lock);
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun return nr;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun static struct request *
kyber_dispatch_cur_domain(struct kyber_queue_data * kqd,struct kyber_hctx_data * khd,struct blk_mq_hw_ctx * hctx)751*4882a593Smuzhiyun kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
752*4882a593Smuzhiyun struct kyber_hctx_data *khd,
753*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun struct list_head *rqs;
756*4882a593Smuzhiyun struct request *rq;
757*4882a593Smuzhiyun int nr;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun rqs = &khd->rqs[khd->cur_domain];
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun * If we already have a flushed request, then we just need to get a
763*4882a593Smuzhiyun * token for it. Otherwise, if there are pending requests in the kcqs,
764*4882a593Smuzhiyun * flush the kcqs, but only if we can get a token. If not, we should
765*4882a593Smuzhiyun * leave the requests in the kcqs so that they can be merged. Note that
766*4882a593Smuzhiyun * khd->lock serializes the flushes, so if we observed any bit set in
767*4882a593Smuzhiyun * the kcq_map, we will always get a request.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun rq = list_first_entry_or_null(rqs, struct request, queuelist);
770*4882a593Smuzhiyun if (rq) {
771*4882a593Smuzhiyun nr = kyber_get_domain_token(kqd, khd, hctx);
772*4882a593Smuzhiyun if (nr >= 0) {
773*4882a593Smuzhiyun khd->batching++;
774*4882a593Smuzhiyun rq_set_domain_token(rq, nr);
775*4882a593Smuzhiyun list_del_init(&rq->queuelist);
776*4882a593Smuzhiyun return rq;
777*4882a593Smuzhiyun } else {
778*4882a593Smuzhiyun trace_kyber_throttled(kqd->q,
779*4882a593Smuzhiyun kyber_domain_names[khd->cur_domain]);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
782*4882a593Smuzhiyun nr = kyber_get_domain_token(kqd, khd, hctx);
783*4882a593Smuzhiyun if (nr >= 0) {
784*4882a593Smuzhiyun kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
785*4882a593Smuzhiyun rq = list_first_entry(rqs, struct request, queuelist);
786*4882a593Smuzhiyun khd->batching++;
787*4882a593Smuzhiyun rq_set_domain_token(rq, nr);
788*4882a593Smuzhiyun list_del_init(&rq->queuelist);
789*4882a593Smuzhiyun return rq;
790*4882a593Smuzhiyun } else {
791*4882a593Smuzhiyun trace_kyber_throttled(kqd->q,
792*4882a593Smuzhiyun kyber_domain_names[khd->cur_domain]);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /* There were either no pending requests or no tokens. */
797*4882a593Smuzhiyun return NULL;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
kyber_dispatch_request(struct blk_mq_hw_ctx * hctx)800*4882a593Smuzhiyun static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
803*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data;
804*4882a593Smuzhiyun struct request *rq;
805*4882a593Smuzhiyun int i;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun spin_lock(&khd->lock);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun * First, if we are still entitled to batch, try to dispatch a request
811*4882a593Smuzhiyun * from the batch.
812*4882a593Smuzhiyun */
813*4882a593Smuzhiyun if (khd->batching < kyber_batch_size[khd->cur_domain]) {
814*4882a593Smuzhiyun rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
815*4882a593Smuzhiyun if (rq)
816*4882a593Smuzhiyun goto out;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /*
820*4882a593Smuzhiyun * Either,
821*4882a593Smuzhiyun * 1. We were no longer entitled to a batch.
822*4882a593Smuzhiyun * 2. The domain we were batching didn't have any requests.
823*4882a593Smuzhiyun * 3. The domain we were batching was out of tokens.
824*4882a593Smuzhiyun *
825*4882a593Smuzhiyun * Start another batch. Note that this wraps back around to the original
826*4882a593Smuzhiyun * domain if no other domains have requests or tokens.
827*4882a593Smuzhiyun */
828*4882a593Smuzhiyun khd->batching = 0;
829*4882a593Smuzhiyun for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
830*4882a593Smuzhiyun if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
831*4882a593Smuzhiyun khd->cur_domain = 0;
832*4882a593Smuzhiyun else
833*4882a593Smuzhiyun khd->cur_domain++;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
836*4882a593Smuzhiyun if (rq)
837*4882a593Smuzhiyun goto out;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun rq = NULL;
841*4882a593Smuzhiyun out:
842*4882a593Smuzhiyun spin_unlock(&khd->lock);
843*4882a593Smuzhiyun return rq;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
kyber_has_work(struct blk_mq_hw_ctx * hctx)846*4882a593Smuzhiyun static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data;
849*4882a593Smuzhiyun int i;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
852*4882a593Smuzhiyun if (!list_empty_careful(&khd->rqs[i]) ||
853*4882a593Smuzhiyun sbitmap_any_bit_set(&khd->kcq_map[i]))
854*4882a593Smuzhiyun return true;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun return false;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun #define KYBER_LAT_SHOW_STORE(domain, name) \
861*4882a593Smuzhiyun static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
862*4882a593Smuzhiyun char *page) \
863*4882a593Smuzhiyun { \
864*4882a593Smuzhiyun struct kyber_queue_data *kqd = e->elevator_data; \
865*4882a593Smuzhiyun \
866*4882a593Smuzhiyun return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
867*4882a593Smuzhiyun } \
868*4882a593Smuzhiyun \
869*4882a593Smuzhiyun static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
870*4882a593Smuzhiyun const char *page, size_t count) \
871*4882a593Smuzhiyun { \
872*4882a593Smuzhiyun struct kyber_queue_data *kqd = e->elevator_data; \
873*4882a593Smuzhiyun unsigned long long nsec; \
874*4882a593Smuzhiyun int ret; \
875*4882a593Smuzhiyun \
876*4882a593Smuzhiyun ret = kstrtoull(page, 10, &nsec); \
877*4882a593Smuzhiyun if (ret) \
878*4882a593Smuzhiyun return ret; \
879*4882a593Smuzhiyun \
880*4882a593Smuzhiyun kqd->latency_targets[domain] = nsec; \
881*4882a593Smuzhiyun \
882*4882a593Smuzhiyun return count; \
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun KYBER_LAT_SHOW_STORE(KYBER_READ, read);
885*4882a593Smuzhiyun KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
886*4882a593Smuzhiyun #undef KYBER_LAT_SHOW_STORE
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
889*4882a593Smuzhiyun static struct elv_fs_entry kyber_sched_attrs[] = {
890*4882a593Smuzhiyun KYBER_LAT_ATTR(read),
891*4882a593Smuzhiyun KYBER_LAT_ATTR(write),
892*4882a593Smuzhiyun __ATTR_NULL
893*4882a593Smuzhiyun };
894*4882a593Smuzhiyun #undef KYBER_LAT_ATTR
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEBUG_FS
897*4882a593Smuzhiyun #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
898*4882a593Smuzhiyun static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
899*4882a593Smuzhiyun { \
900*4882a593Smuzhiyun struct request_queue *q = data; \
901*4882a593Smuzhiyun struct kyber_queue_data *kqd = q->elevator->elevator_data; \
902*4882a593Smuzhiyun \
903*4882a593Smuzhiyun sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
904*4882a593Smuzhiyun return 0; \
905*4882a593Smuzhiyun } \
906*4882a593Smuzhiyun \
907*4882a593Smuzhiyun static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
908*4882a593Smuzhiyun __acquires(&khd->lock) \
909*4882a593Smuzhiyun { \
910*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = m->private; \
911*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data; \
912*4882a593Smuzhiyun \
913*4882a593Smuzhiyun spin_lock(&khd->lock); \
914*4882a593Smuzhiyun return seq_list_start(&khd->rqs[domain], *pos); \
915*4882a593Smuzhiyun } \
916*4882a593Smuzhiyun \
917*4882a593Smuzhiyun static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
918*4882a593Smuzhiyun loff_t *pos) \
919*4882a593Smuzhiyun { \
920*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = m->private; \
921*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data; \
922*4882a593Smuzhiyun \
923*4882a593Smuzhiyun return seq_list_next(v, &khd->rqs[domain], pos); \
924*4882a593Smuzhiyun } \
925*4882a593Smuzhiyun \
926*4882a593Smuzhiyun static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
927*4882a593Smuzhiyun __releases(&khd->lock) \
928*4882a593Smuzhiyun { \
929*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = m->private; \
930*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data; \
931*4882a593Smuzhiyun \
932*4882a593Smuzhiyun spin_unlock(&khd->lock); \
933*4882a593Smuzhiyun } \
934*4882a593Smuzhiyun \
935*4882a593Smuzhiyun static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
936*4882a593Smuzhiyun .start = kyber_##name##_rqs_start, \
937*4882a593Smuzhiyun .next = kyber_##name##_rqs_next, \
938*4882a593Smuzhiyun .stop = kyber_##name##_rqs_stop, \
939*4882a593Smuzhiyun .show = blk_mq_debugfs_rq_show, \
940*4882a593Smuzhiyun }; \
941*4882a593Smuzhiyun \
942*4882a593Smuzhiyun static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
943*4882a593Smuzhiyun { \
944*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = data; \
945*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data; \
946*4882a593Smuzhiyun wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
947*4882a593Smuzhiyun \
948*4882a593Smuzhiyun seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
949*4882a593Smuzhiyun return 0; \
950*4882a593Smuzhiyun }
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ,read)951*4882a593Smuzhiyun KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
952*4882a593Smuzhiyun KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
953*4882a593Smuzhiyun KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
954*4882a593Smuzhiyun KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
955*4882a593Smuzhiyun #undef KYBER_DEBUGFS_DOMAIN_ATTRS
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun static int kyber_async_depth_show(void *data, struct seq_file *m)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun struct request_queue *q = data;
960*4882a593Smuzhiyun struct kyber_queue_data *kqd = q->elevator->elevator_data;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun seq_printf(m, "%u\n", kqd->async_depth);
963*4882a593Smuzhiyun return 0;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
kyber_cur_domain_show(void * data,struct seq_file * m)966*4882a593Smuzhiyun static int kyber_cur_domain_show(void *data, struct seq_file *m)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = data;
969*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
972*4882a593Smuzhiyun return 0;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
kyber_batching_show(void * data,struct seq_file * m)975*4882a593Smuzhiyun static int kyber_batching_show(void *data, struct seq_file *m)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = data;
978*4882a593Smuzhiyun struct kyber_hctx_data *khd = hctx->sched_data;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun seq_printf(m, "%u\n", khd->batching);
981*4882a593Smuzhiyun return 0;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun #define KYBER_QUEUE_DOMAIN_ATTRS(name) \
985*4882a593Smuzhiyun {#name "_tokens", 0400, kyber_##name##_tokens_show}
986*4882a593Smuzhiyun static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
987*4882a593Smuzhiyun KYBER_QUEUE_DOMAIN_ATTRS(read),
988*4882a593Smuzhiyun KYBER_QUEUE_DOMAIN_ATTRS(write),
989*4882a593Smuzhiyun KYBER_QUEUE_DOMAIN_ATTRS(discard),
990*4882a593Smuzhiyun KYBER_QUEUE_DOMAIN_ATTRS(other),
991*4882a593Smuzhiyun {"async_depth", 0400, kyber_async_depth_show},
992*4882a593Smuzhiyun {},
993*4882a593Smuzhiyun };
994*4882a593Smuzhiyun #undef KYBER_QUEUE_DOMAIN_ATTRS
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun #define KYBER_HCTX_DOMAIN_ATTRS(name) \
997*4882a593Smuzhiyun {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
998*4882a593Smuzhiyun {#name "_waiting", 0400, kyber_##name##_waiting_show}
999*4882a593Smuzhiyun static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
1000*4882a593Smuzhiyun KYBER_HCTX_DOMAIN_ATTRS(read),
1001*4882a593Smuzhiyun KYBER_HCTX_DOMAIN_ATTRS(write),
1002*4882a593Smuzhiyun KYBER_HCTX_DOMAIN_ATTRS(discard),
1003*4882a593Smuzhiyun KYBER_HCTX_DOMAIN_ATTRS(other),
1004*4882a593Smuzhiyun {"cur_domain", 0400, kyber_cur_domain_show},
1005*4882a593Smuzhiyun {"batching", 0400, kyber_batching_show},
1006*4882a593Smuzhiyun {},
1007*4882a593Smuzhiyun };
1008*4882a593Smuzhiyun #undef KYBER_HCTX_DOMAIN_ATTRS
1009*4882a593Smuzhiyun #endif
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun static struct elevator_type kyber_sched = {
1012*4882a593Smuzhiyun .ops = {
1013*4882a593Smuzhiyun .init_sched = kyber_init_sched,
1014*4882a593Smuzhiyun .exit_sched = kyber_exit_sched,
1015*4882a593Smuzhiyun .init_hctx = kyber_init_hctx,
1016*4882a593Smuzhiyun .exit_hctx = kyber_exit_hctx,
1017*4882a593Smuzhiyun .limit_depth = kyber_limit_depth,
1018*4882a593Smuzhiyun .bio_merge = kyber_bio_merge,
1019*4882a593Smuzhiyun .prepare_request = kyber_prepare_request,
1020*4882a593Smuzhiyun .insert_requests = kyber_insert_requests,
1021*4882a593Smuzhiyun .finish_request = kyber_finish_request,
1022*4882a593Smuzhiyun .requeue_request = kyber_finish_request,
1023*4882a593Smuzhiyun .completed_request = kyber_completed_request,
1024*4882a593Smuzhiyun .dispatch_request = kyber_dispatch_request,
1025*4882a593Smuzhiyun .has_work = kyber_has_work,
1026*4882a593Smuzhiyun },
1027*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEBUG_FS
1028*4882a593Smuzhiyun .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
1029*4882a593Smuzhiyun .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
1030*4882a593Smuzhiyun #endif
1031*4882a593Smuzhiyun .elevator_attrs = kyber_sched_attrs,
1032*4882a593Smuzhiyun .elevator_name = "kyber",
1033*4882a593Smuzhiyun .elevator_features = ELEVATOR_F_MQ_AWARE,
1034*4882a593Smuzhiyun .elevator_owner = THIS_MODULE,
1035*4882a593Smuzhiyun };
1036*4882a593Smuzhiyun
kyber_init(void)1037*4882a593Smuzhiyun static int __init kyber_init(void)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun return elv_register(&kyber_sched);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
kyber_exit(void)1042*4882a593Smuzhiyun static void __exit kyber_exit(void)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun elv_unregister(&kyber_sched);
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun module_init(kyber_init);
1048*4882a593Smuzhiyun module_exit(kyber_exit);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun MODULE_AUTHOR("Omar Sandoval");
1051*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1052*4882a593Smuzhiyun MODULE_DESCRIPTION("Kyber I/O scheduler");
1053