1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Functions to sequence PREFLUSH and FUA writes.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
6*4882a593Smuzhiyun * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9*4882a593Smuzhiyun * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10*4882a593Smuzhiyun * properties and hardware capability.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13*4882a593Smuzhiyun * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
14*4882a593Smuzhiyun * that the device cache should be flushed before the data is executed, and
15*4882a593Smuzhiyun * REQ_FUA means that the data must be on non-volatile media on request
16*4882a593Smuzhiyun * completion.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19*4882a593Smuzhiyun * difference. The requests are either completed immediately if there's no data
20*4882a593Smuzhiyun * or executed as normal requests otherwise.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23*4882a593Smuzhiyun * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26*4882a593Smuzhiyun * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * The actual execution of flush is double buffered. Whenever a request
29*4882a593Smuzhiyun * needs to execute PRE or POSTFLUSH, it queues at
30*4882a593Smuzhiyun * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
31*4882a593Smuzhiyun * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
32*4882a593Smuzhiyun * completes, all the requests which were pending are proceeded to the next
33*4882a593Smuzhiyun * step. This allows arbitrary merging of different types of PREFLUSH/FUA
34*4882a593Smuzhiyun * requests.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * Currently, the following conditions are used to determine when to issue
37*4882a593Smuzhiyun * flush.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * C1. At any given time, only one flush shall be in progress. This makes
40*4882a593Smuzhiyun * double buffering sufficient.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * C2. Flush is deferred if any request is executing DATA of its sequence.
43*4882a593Smuzhiyun * This avoids issuing separate POSTFLUSHes for requests which shared
44*4882a593Smuzhiyun * PREFLUSH.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * C3. The second condition is ignored if there is a request which has
47*4882a593Smuzhiyun * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48*4882a593Smuzhiyun * starvation in the unlikely case where there are continuous stream of
49*4882a593Smuzhiyun * FUA (without PREFLUSH) requests.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52*4882a593Smuzhiyun * is beneficial.
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55*4882a593Smuzhiyun * Once while executing DATA and again after the whole sequence is
56*4882a593Smuzhiyun * complete. The first completion updates the contained bio but doesn't
57*4882a593Smuzhiyun * finish it so that the bio submitter is notified only after the whole
58*4882a593Smuzhiyun * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
59*4882a593Smuzhiyun * req_bio_endio().
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * The above peculiarity requires that each PREFLUSH/FUA request has only one
62*4882a593Smuzhiyun * bio attached to it, which is guaranteed as they aren't allowed to be
63*4882a593Smuzhiyun * merged in the usual way.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #include <linux/kernel.h>
67*4882a593Smuzhiyun #include <linux/module.h>
68*4882a593Smuzhiyun #include <linux/bio.h>
69*4882a593Smuzhiyun #include <linux/blkdev.h>
70*4882a593Smuzhiyun #include <linux/gfp.h>
71*4882a593Smuzhiyun #include <linux/blk-mq.h>
72*4882a593Smuzhiyun #include <linux/lockdep.h>
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #include "blk.h"
75*4882a593Smuzhiyun #include "blk-mq.h"
76*4882a593Smuzhiyun #include "blk-mq-tag.h"
77*4882a593Smuzhiyun #include "blk-mq-sched.h"
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* PREFLUSH/FUA sequences */
80*4882a593Smuzhiyun enum {
81*4882a593Smuzhiyun REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
82*4882a593Smuzhiyun REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
83*4882a593Smuzhiyun REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
84*4882a593Smuzhiyun REQ_FSEQ_DONE = (1 << 3),
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87*4882a593Smuzhiyun REQ_FSEQ_POSTFLUSH,
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * If flush has been pending longer than the following timeout,
91*4882a593Smuzhiyun * it's issued even if flush_data requests are still in flight.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun FLUSH_PENDING_TIMEOUT = 5 * HZ,
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static void blk_kick_flush(struct request_queue *q,
97*4882a593Smuzhiyun struct blk_flush_queue *fq, unsigned int flags);
98*4882a593Smuzhiyun
blk_flush_policy(unsigned long fflags,struct request * rq)99*4882a593Smuzhiyun static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun unsigned int policy = 0;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (blk_rq_sectors(rq))
104*4882a593Smuzhiyun policy |= REQ_FSEQ_DATA;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (fflags & (1UL << QUEUE_FLAG_WC)) {
107*4882a593Smuzhiyun if (rq->cmd_flags & REQ_PREFLUSH)
108*4882a593Smuzhiyun policy |= REQ_FSEQ_PREFLUSH;
109*4882a593Smuzhiyun if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110*4882a593Smuzhiyun (rq->cmd_flags & REQ_FUA))
111*4882a593Smuzhiyun policy |= REQ_FSEQ_POSTFLUSH;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun return policy;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
blk_flush_cur_seq(struct request * rq)116*4882a593Smuzhiyun static unsigned int blk_flush_cur_seq(struct request *rq)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun return 1 << ffz(rq->flush.seq);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
blk_flush_restore_request(struct request * rq)121*4882a593Smuzhiyun static void blk_flush_restore_request(struct request *rq)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * After flush data completion, @rq->bio is %NULL but we need to
125*4882a593Smuzhiyun * complete the bio again. @rq->biotail is guaranteed to equal the
126*4882a593Smuzhiyun * original @rq->bio. Restore it.
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun rq->bio = rq->biotail;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* make @rq a normal request */
131*4882a593Smuzhiyun rq->rq_flags &= ~RQF_FLUSH_SEQ;
132*4882a593Smuzhiyun rq->end_io = rq->flush.saved_end_io;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
blk_flush_queue_rq(struct request * rq,bool add_front)135*4882a593Smuzhiyun static void blk_flush_queue_rq(struct request *rq, bool add_front)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun blk_mq_add_to_requeue_list(rq, add_front, true);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
blk_account_io_flush(struct request * rq)140*4882a593Smuzhiyun static void blk_account_io_flush(struct request *rq)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct hd_struct *part = &rq->rq_disk->part0;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun part_stat_lock();
145*4882a593Smuzhiyun part_stat_inc(part, ios[STAT_FLUSH]);
146*4882a593Smuzhiyun part_stat_add(part, nsecs[STAT_FLUSH],
147*4882a593Smuzhiyun ktime_get_ns() - rq->start_time_ns);
148*4882a593Smuzhiyun part_stat_unlock();
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /**
152*4882a593Smuzhiyun * blk_flush_complete_seq - complete flush sequence
153*4882a593Smuzhiyun * @rq: PREFLUSH/FUA request being sequenced
154*4882a593Smuzhiyun * @fq: flush queue
155*4882a593Smuzhiyun * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
156*4882a593Smuzhiyun * @error: whether an error occurred
157*4882a593Smuzhiyun *
158*4882a593Smuzhiyun * @rq just completed @seq part of its flush sequence, record the
159*4882a593Smuzhiyun * completion and trigger the next step.
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun * CONTEXT:
162*4882a593Smuzhiyun * spin_lock_irq(fq->mq_flush_lock)
163*4882a593Smuzhiyun */
blk_flush_complete_seq(struct request * rq,struct blk_flush_queue * fq,unsigned int seq,blk_status_t error)164*4882a593Smuzhiyun static void blk_flush_complete_seq(struct request *rq,
165*4882a593Smuzhiyun struct blk_flush_queue *fq,
166*4882a593Smuzhiyun unsigned int seq, blk_status_t error)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct request_queue *q = rq->q;
169*4882a593Smuzhiyun struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
170*4882a593Smuzhiyun unsigned int cmd_flags;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun BUG_ON(rq->flush.seq & seq);
173*4882a593Smuzhiyun rq->flush.seq |= seq;
174*4882a593Smuzhiyun cmd_flags = rq->cmd_flags;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (likely(!error))
177*4882a593Smuzhiyun seq = blk_flush_cur_seq(rq);
178*4882a593Smuzhiyun else
179*4882a593Smuzhiyun seq = REQ_FSEQ_DONE;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun switch (seq) {
182*4882a593Smuzhiyun case REQ_FSEQ_PREFLUSH:
183*4882a593Smuzhiyun case REQ_FSEQ_POSTFLUSH:
184*4882a593Smuzhiyun /* queue for flush */
185*4882a593Smuzhiyun if (list_empty(pending))
186*4882a593Smuzhiyun fq->flush_pending_since = jiffies;
187*4882a593Smuzhiyun list_move_tail(&rq->flush.list, pending);
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun case REQ_FSEQ_DATA:
191*4882a593Smuzhiyun list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
192*4882a593Smuzhiyun blk_flush_queue_rq(rq, true);
193*4882a593Smuzhiyun break;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun case REQ_FSEQ_DONE:
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * @rq was previously adjusted by blk_insert_flush() for
198*4882a593Smuzhiyun * flush sequencing and may already have gone through the
199*4882a593Smuzhiyun * flush data request completion path. Restore @rq for
200*4882a593Smuzhiyun * normal completion and end it.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun BUG_ON(!list_empty(&rq->queuelist));
203*4882a593Smuzhiyun list_del_init(&rq->flush.list);
204*4882a593Smuzhiyun blk_flush_restore_request(rq);
205*4882a593Smuzhiyun blk_mq_end_request(rq, error);
206*4882a593Smuzhiyun break;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun default:
209*4882a593Smuzhiyun BUG();
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun blk_kick_flush(q, fq, cmd_flags);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
flush_end_io(struct request * flush_rq,blk_status_t error)215*4882a593Smuzhiyun static void flush_end_io(struct request *flush_rq, blk_status_t error)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct request_queue *q = flush_rq->q;
218*4882a593Smuzhiyun struct list_head *running;
219*4882a593Smuzhiyun struct request *rq, *n;
220*4882a593Smuzhiyun unsigned long flags = 0;
221*4882a593Smuzhiyun struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* release the tag's ownership to the req cloned from */
224*4882a593Smuzhiyun spin_lock_irqsave(&fq->mq_flush_lock, flags);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (!refcount_dec_and_test(&flush_rq->ref)) {
227*4882a593Smuzhiyun fq->rq_status = error;
228*4882a593Smuzhiyun spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
229*4882a593Smuzhiyun return;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun blk_account_io_flush(flush_rq);
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * Flush request has to be marked as IDLE when it is really ended
235*4882a593Smuzhiyun * because its .end_io() is called from timeout code path too for
236*4882a593Smuzhiyun * avoiding use-after-free.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
239*4882a593Smuzhiyun if (fq->rq_status != BLK_STS_OK) {
240*4882a593Smuzhiyun error = fq->rq_status;
241*4882a593Smuzhiyun fq->rq_status = BLK_STS_OK;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (!q->elevator) {
245*4882a593Smuzhiyun flush_rq->tag = BLK_MQ_NO_TAG;
246*4882a593Smuzhiyun } else {
247*4882a593Smuzhiyun blk_mq_put_driver_tag(flush_rq);
248*4882a593Smuzhiyun flush_rq->internal_tag = BLK_MQ_NO_TAG;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun running = &fq->flush_queue[fq->flush_running_idx];
252*4882a593Smuzhiyun BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* account completion of the flush request */
255*4882a593Smuzhiyun fq->flush_running_idx ^= 1;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* and push the waiting requests to the next stage */
258*4882a593Smuzhiyun list_for_each_entry_safe(rq, n, running, flush.list) {
259*4882a593Smuzhiyun unsigned int seq = blk_flush_cur_seq(rq);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
262*4882a593Smuzhiyun blk_flush_complete_seq(rq, fq, seq, error);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
is_flush_rq(struct request * rq)268*4882a593Smuzhiyun bool is_flush_rq(struct request *rq)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun return rq->end_io == flush_end_io;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun * blk_kick_flush - consider issuing flush request
275*4882a593Smuzhiyun * @q: request_queue being kicked
276*4882a593Smuzhiyun * @fq: flush queue
277*4882a593Smuzhiyun * @flags: cmd_flags of the original request
278*4882a593Smuzhiyun *
279*4882a593Smuzhiyun * Flush related states of @q have changed, consider issuing flush request.
280*4882a593Smuzhiyun * Please read the comment at the top of this file for more info.
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * CONTEXT:
283*4882a593Smuzhiyun * spin_lock_irq(fq->mq_flush_lock)
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun */
blk_kick_flush(struct request_queue * q,struct blk_flush_queue * fq,unsigned int flags)286*4882a593Smuzhiyun static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
287*4882a593Smuzhiyun unsigned int flags)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
290*4882a593Smuzhiyun struct request *first_rq =
291*4882a593Smuzhiyun list_first_entry(pending, struct request, flush.list);
292*4882a593Smuzhiyun struct request *flush_rq = fq->flush_rq;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* C1 described at the top of this file */
295*4882a593Smuzhiyun if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
296*4882a593Smuzhiyun return;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* C2 and C3 */
299*4882a593Smuzhiyun if (!list_empty(&fq->flush_data_in_flight) &&
300*4882a593Smuzhiyun time_before(jiffies,
301*4882a593Smuzhiyun fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
302*4882a593Smuzhiyun return;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Issue flush and toggle pending_idx. This makes pending_idx
306*4882a593Smuzhiyun * different from running_idx, which means flush is in flight.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun fq->flush_pending_idx ^= 1;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun blk_rq_init(q, flush_rq);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * In case of none scheduler, borrow tag from the first request
314*4882a593Smuzhiyun * since they can't be in flight at the same time. And acquire
315*4882a593Smuzhiyun * the tag's ownership for flush req.
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * In case of IO scheduler, flush rq need to borrow scheduler tag
318*4882a593Smuzhiyun * just for cheating put/get driver tag.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun flush_rq->mq_ctx = first_rq->mq_ctx;
321*4882a593Smuzhiyun flush_rq->mq_hctx = first_rq->mq_hctx;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (!q->elevator) {
324*4882a593Smuzhiyun flush_rq->tag = first_rq->tag;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * We borrow data request's driver tag, so have to mark
328*4882a593Smuzhiyun * this flush request as INFLIGHT for avoiding double
329*4882a593Smuzhiyun * account of this driver tag
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
332*4882a593Smuzhiyun } else
333*4882a593Smuzhiyun flush_rq->internal_tag = first_rq->internal_tag;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
336*4882a593Smuzhiyun flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
337*4882a593Smuzhiyun flush_rq->rq_flags |= RQF_FLUSH_SEQ;
338*4882a593Smuzhiyun flush_rq->rq_disk = first_rq->rq_disk;
339*4882a593Smuzhiyun flush_rq->end_io = flush_end_io;
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
342*4882a593Smuzhiyun * implied in refcount_inc_not_zero() called from
343*4882a593Smuzhiyun * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
344*4882a593Smuzhiyun * and READ flush_rq->end_io
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun smp_wmb();
347*4882a593Smuzhiyun refcount_set(&flush_rq->ref, 1);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun blk_flush_queue_rq(flush_rq, false);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
mq_flush_data_end_io(struct request * rq,blk_status_t error)352*4882a593Smuzhiyun static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct request_queue *q = rq->q;
355*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
356*4882a593Smuzhiyun struct blk_mq_ctx *ctx = rq->mq_ctx;
357*4882a593Smuzhiyun unsigned long flags;
358*4882a593Smuzhiyun struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (q->elevator) {
361*4882a593Smuzhiyun WARN_ON(rq->tag < 0);
362*4882a593Smuzhiyun blk_mq_put_driver_tag(rq);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * After populating an empty queue, kick it to avoid stall. Read
367*4882a593Smuzhiyun * the comment in flush_end_io().
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun spin_lock_irqsave(&fq->mq_flush_lock, flags);
370*4882a593Smuzhiyun blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
371*4882a593Smuzhiyun spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun blk_mq_sched_restart(hctx);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /**
377*4882a593Smuzhiyun * blk_insert_flush - insert a new PREFLUSH/FUA request
378*4882a593Smuzhiyun * @rq: request to insert
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
381*4882a593Smuzhiyun * or __blk_mq_run_hw_queue() to dispatch request.
382*4882a593Smuzhiyun * @rq is being submitted. Analyze what needs to be done and put it on the
383*4882a593Smuzhiyun * right queue.
384*4882a593Smuzhiyun */
blk_insert_flush(struct request * rq)385*4882a593Smuzhiyun void blk_insert_flush(struct request *rq)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct request_queue *q = rq->q;
388*4882a593Smuzhiyun unsigned long fflags = q->queue_flags; /* may change, cache */
389*4882a593Smuzhiyun unsigned int policy = blk_flush_policy(fflags, rq);
390*4882a593Smuzhiyun struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * @policy now records what operations need to be done. Adjust
394*4882a593Smuzhiyun * REQ_PREFLUSH and FUA for the driver.
395*4882a593Smuzhiyun */
396*4882a593Smuzhiyun rq->cmd_flags &= ~REQ_PREFLUSH;
397*4882a593Smuzhiyun if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
398*4882a593Smuzhiyun rq->cmd_flags &= ~REQ_FUA;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
402*4882a593Smuzhiyun * of those flags, we have to set REQ_SYNC to avoid skewing
403*4882a593Smuzhiyun * the request accounting.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun rq->cmd_flags |= REQ_SYNC;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /*
408*4882a593Smuzhiyun * An empty flush handed down from a stacking driver may
409*4882a593Smuzhiyun * translate into nothing if the underlying device does not
410*4882a593Smuzhiyun * advertise a write-back cache. In this case, simply
411*4882a593Smuzhiyun * complete the request.
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun if (!policy) {
414*4882a593Smuzhiyun blk_mq_end_request(rq, 0);
415*4882a593Smuzhiyun return;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * If there's data but flush is not necessary, the request can be
422*4882a593Smuzhiyun * processed directly without going through flush machinery. Queue
423*4882a593Smuzhiyun * for normal execution.
424*4882a593Smuzhiyun */
425*4882a593Smuzhiyun if ((policy & REQ_FSEQ_DATA) &&
426*4882a593Smuzhiyun !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
427*4882a593Smuzhiyun blk_mq_request_bypass_insert(rq, false, false);
428*4882a593Smuzhiyun return;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun * @rq should go through flush machinery. Mark it part of flush
433*4882a593Smuzhiyun * sequence and submit for further processing.
434*4882a593Smuzhiyun */
435*4882a593Smuzhiyun memset(&rq->flush, 0, sizeof(rq->flush));
436*4882a593Smuzhiyun INIT_LIST_HEAD(&rq->flush.list);
437*4882a593Smuzhiyun rq->rq_flags |= RQF_FLUSH_SEQ;
438*4882a593Smuzhiyun rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun rq->end_io = mq_flush_data_end_io;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun spin_lock_irq(&fq->mq_flush_lock);
443*4882a593Smuzhiyun blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
444*4882a593Smuzhiyun spin_unlock_irq(&fq->mq_flush_lock);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun * blkdev_issue_flush - queue a flush
449*4882a593Smuzhiyun * @bdev: blockdev to issue flush for
450*4882a593Smuzhiyun * @gfp_mask: memory allocation flags (for bio_alloc)
451*4882a593Smuzhiyun *
452*4882a593Smuzhiyun * Description:
453*4882a593Smuzhiyun * Issue a flush for the block device in question.
454*4882a593Smuzhiyun */
blkdev_issue_flush(struct block_device * bdev,gfp_t gfp_mask)455*4882a593Smuzhiyun int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun struct bio *bio;
458*4882a593Smuzhiyun int ret = 0;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun bio = bio_alloc(gfp_mask, 0);
461*4882a593Smuzhiyun bio_set_dev(bio, bdev);
462*4882a593Smuzhiyun bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun ret = submit_bio_wait(bio);
465*4882a593Smuzhiyun bio_put(bio);
466*4882a593Smuzhiyun return ret;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun EXPORT_SYMBOL(blkdev_issue_flush);
469*4882a593Smuzhiyun
blk_alloc_flush_queue(int node,int cmd_size,gfp_t flags)470*4882a593Smuzhiyun struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
471*4882a593Smuzhiyun gfp_t flags)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct blk_flush_queue *fq;
474*4882a593Smuzhiyun int rq_sz = sizeof(struct request);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun fq = kzalloc_node(sizeof(*fq), flags, node);
477*4882a593Smuzhiyun if (!fq)
478*4882a593Smuzhiyun goto fail;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun spin_lock_init(&fq->mq_flush_lock);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
483*4882a593Smuzhiyun fq->flush_rq = kzalloc_node(rq_sz, flags, node);
484*4882a593Smuzhiyun if (!fq->flush_rq)
485*4882a593Smuzhiyun goto fail_rq;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun INIT_LIST_HEAD(&fq->flush_queue[0]);
488*4882a593Smuzhiyun INIT_LIST_HEAD(&fq->flush_queue[1]);
489*4882a593Smuzhiyun INIT_LIST_HEAD(&fq->flush_data_in_flight);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun lockdep_register_key(&fq->key);
492*4882a593Smuzhiyun lockdep_set_class(&fq->mq_flush_lock, &fq->key);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return fq;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun fail_rq:
497*4882a593Smuzhiyun kfree(fq);
498*4882a593Smuzhiyun fail:
499*4882a593Smuzhiyun return NULL;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
blk_free_flush_queue(struct blk_flush_queue * fq)502*4882a593Smuzhiyun void blk_free_flush_queue(struct blk_flush_queue *fq)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun /* bio based request queue hasn't flush queue */
505*4882a593Smuzhiyun if (!fq)
506*4882a593Smuzhiyun return;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun lockdep_unregister_key(&fq->key);
509*4882a593Smuzhiyun kfree(fq->flush_rq);
510*4882a593Smuzhiyun kfree(fq);
511*4882a593Smuzhiyun }
512