xref: /OK3568_Linux_fs/kernel/block/blk-timeout.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Functions related to generic timeout handling of requests.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/blkdev.h>
8*4882a593Smuzhiyun #include <linux/fault-inject.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "blk.h"
11*4882a593Smuzhiyun #include "blk-mq.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #ifdef CONFIG_FAIL_IO_TIMEOUT
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun static DECLARE_FAULT_ATTR(fail_io_timeout);
16*4882a593Smuzhiyun 
setup_fail_io_timeout(char * str)17*4882a593Smuzhiyun static int __init setup_fail_io_timeout(char *str)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	return setup_fault_attr(&fail_io_timeout, str);
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun __setup("fail_io_timeout=", setup_fail_io_timeout);
22*4882a593Smuzhiyun 
__blk_should_fake_timeout(struct request_queue * q)23*4882a593Smuzhiyun bool __blk_should_fake_timeout(struct request_queue *q)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	return should_fail(&fail_io_timeout, 1);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__blk_should_fake_timeout);
28*4882a593Smuzhiyun 
fail_io_timeout_debugfs(void)29*4882a593Smuzhiyun static int __init fail_io_timeout_debugfs(void)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
32*4882a593Smuzhiyun 						NULL, &fail_io_timeout);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(dir);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun late_initcall(fail_io_timeout_debugfs);
38*4882a593Smuzhiyun 
part_timeout_show(struct device * dev,struct device_attribute * attr,char * buf)39*4882a593Smuzhiyun ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
40*4882a593Smuzhiyun 			  char *buf)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct gendisk *disk = dev_to_disk(dev);
43*4882a593Smuzhiyun 	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", set != 0);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
part_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)48*4882a593Smuzhiyun ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
49*4882a593Smuzhiyun 			   const char *buf, size_t count)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	struct gendisk *disk = dev_to_disk(dev);
52*4882a593Smuzhiyun 	int val;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (count) {
55*4882a593Smuzhiyun 		struct request_queue *q = disk->queue;
56*4882a593Smuzhiyun 		char *p = (char *) buf;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 		val = simple_strtoul(p, &p, 10);
59*4882a593Smuzhiyun 		if (val)
60*4882a593Smuzhiyun 			blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
61*4882a593Smuzhiyun 		else
62*4882a593Smuzhiyun 			blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return count;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #endif /* CONFIG_FAIL_IO_TIMEOUT */
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun  * blk_abort_request - Request recovery for the specified command
72*4882a593Smuzhiyun  * @req:	pointer to the request of interest
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * This function requests that the block layer start recovery for the
75*4882a593Smuzhiyun  * request by deleting the timer and calling the q's timeout function.
76*4882a593Smuzhiyun  * LLDDs who implement their own error recovery MAY ignore the timeout
77*4882a593Smuzhiyun  * event if they generated blk_abort_request.
78*4882a593Smuzhiyun  */
blk_abort_request(struct request * req)79*4882a593Smuzhiyun void blk_abort_request(struct request *req)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	/*
82*4882a593Smuzhiyun 	 * All we need to ensure is that timeout scan takes place
83*4882a593Smuzhiyun 	 * immediately and that scan sees the new timeout value.
84*4882a593Smuzhiyun 	 * No need for fancy synchronizations.
85*4882a593Smuzhiyun 	 */
86*4882a593Smuzhiyun 	WRITE_ONCE(req->deadline, jiffies);
87*4882a593Smuzhiyun 	kblockd_schedule_work(&req->q->timeout_work);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_abort_request);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static unsigned long blk_timeout_mask __read_mostly;
92*4882a593Smuzhiyun 
blk_timeout_init(void)93*4882a593Smuzhiyun static int __init blk_timeout_init(void)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	blk_timeout_mask = roundup_pow_of_two(HZ) - 1;
96*4882a593Smuzhiyun 	return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun late_initcall(blk_timeout_init);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun  * Just a rough estimate, we don't care about specific values for timeouts.
103*4882a593Smuzhiyun  */
blk_round_jiffies(unsigned long j)104*4882a593Smuzhiyun static inline unsigned long blk_round_jiffies(unsigned long j)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	return (j + blk_timeout_mask) + 1;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
blk_rq_timeout(unsigned long timeout)109*4882a593Smuzhiyun unsigned long blk_rq_timeout(unsigned long timeout)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	unsigned long maxt;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT);
114*4882a593Smuzhiyun 	if (time_after(timeout, maxt))
115*4882a593Smuzhiyun 		timeout = maxt;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	return timeout;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /**
121*4882a593Smuzhiyun  * blk_add_timer - Start timeout timer for a single request
122*4882a593Smuzhiyun  * @req:	request that is about to start running.
123*4882a593Smuzhiyun  *
124*4882a593Smuzhiyun  * Notes:
125*4882a593Smuzhiyun  *    Each request has its own timer, and as it is added to the queue, we
126*4882a593Smuzhiyun  *    set up the timer. When the request completes, we cancel the timer.
127*4882a593Smuzhiyun  */
blk_add_timer(struct request * req)128*4882a593Smuzhiyun void blk_add_timer(struct request *req)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct request_queue *q = req->q;
131*4882a593Smuzhiyun 	unsigned long expiry;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/*
134*4882a593Smuzhiyun 	 * Some LLDs, like scsi, peek at the timeout to prevent a
135*4882a593Smuzhiyun 	 * command from being retried forever.
136*4882a593Smuzhiyun 	 */
137*4882a593Smuzhiyun 	if (!req->timeout)
138*4882a593Smuzhiyun 		req->timeout = q->rq_timeout;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	req->rq_flags &= ~RQF_TIMED_OUT;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	expiry = jiffies + req->timeout;
143*4882a593Smuzhiyun 	WRITE_ONCE(req->deadline, expiry);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/*
146*4882a593Smuzhiyun 	 * If the timer isn't already pending or this timeout is earlier
147*4882a593Smuzhiyun 	 * than an existing one, modify the timer. Round up to next nearest
148*4882a593Smuzhiyun 	 * second.
149*4882a593Smuzhiyun 	 */
150*4882a593Smuzhiyun 	expiry = blk_rq_timeout(blk_round_jiffies(expiry));
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (!timer_pending(&q->timeout) ||
153*4882a593Smuzhiyun 	    time_before(expiry, q->timeout.expires)) {
154*4882a593Smuzhiyun 		unsigned long diff = q->timeout.expires - expiry;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		/*
157*4882a593Smuzhiyun 		 * Due to added timer slack to group timers, the timer
158*4882a593Smuzhiyun 		 * will often be a little in front of what we asked for.
159*4882a593Smuzhiyun 		 * So apply some tolerance here too, otherwise we keep
160*4882a593Smuzhiyun 		 * modifying the timer because expires for value X
161*4882a593Smuzhiyun 		 * will be X + something.
162*4882a593Smuzhiyun 		 */
163*4882a593Smuzhiyun 		if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
164*4882a593Smuzhiyun 			mod_timer(&q->timeout, expiry);
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun }
168