1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * scsi_pm.c Copyright (C) 2010 Alan Stern
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * SCSI dynamic Power Management
6*4882a593Smuzhiyun * Initial version: Alan Stern <stern@rowland.harvard.edu>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/pm_runtime.h>
10*4882a593Smuzhiyun #include <linux/export.h>
11*4882a593Smuzhiyun #include <linux/async.h>
12*4882a593Smuzhiyun #include <linux/blk-pm.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <scsi/scsi.h>
15*4882a593Smuzhiyun #include <scsi/scsi_device.h>
16*4882a593Smuzhiyun #include <scsi/scsi_driver.h>
17*4882a593Smuzhiyun #include <scsi/scsi_host.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "scsi_priv.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
22*4882a593Smuzhiyun
do_scsi_suspend(struct device * dev,const struct dev_pm_ops * pm)23*4882a593Smuzhiyun static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun return pm && pm->suspend ? pm->suspend(dev) : 0;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
do_scsi_freeze(struct device * dev,const struct dev_pm_ops * pm)28*4882a593Smuzhiyun static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun return pm && pm->freeze ? pm->freeze(dev) : 0;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
do_scsi_poweroff(struct device * dev,const struct dev_pm_ops * pm)33*4882a593Smuzhiyun static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun return pm && pm->poweroff ? pm->poweroff(dev) : 0;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
do_scsi_resume(struct device * dev,const struct dev_pm_ops * pm)38*4882a593Smuzhiyun static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun return pm && pm->resume ? pm->resume(dev) : 0;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
do_scsi_thaw(struct device * dev,const struct dev_pm_ops * pm)43*4882a593Smuzhiyun static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun return pm && pm->thaw ? pm->thaw(dev) : 0;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
do_scsi_restore(struct device * dev,const struct dev_pm_ops * pm)48*4882a593Smuzhiyun static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun return pm && pm->restore ? pm->restore(dev) : 0;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
scsi_dev_type_suspend(struct device * dev,int (* cb)(struct device *,const struct dev_pm_ops *))53*4882a593Smuzhiyun static int scsi_dev_type_suspend(struct device *dev,
54*4882a593Smuzhiyun int (*cb)(struct device *, const struct dev_pm_ops *))
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
57*4882a593Smuzhiyun int err;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* flush pending in-flight resume operations, suspend is synchronous */
60*4882a593Smuzhiyun async_synchronize_full_domain(&scsi_sd_pm_domain);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun err = scsi_device_quiesce(to_scsi_device(dev));
63*4882a593Smuzhiyun if (err == 0) {
64*4882a593Smuzhiyun err = cb(dev, pm);
65*4882a593Smuzhiyun if (err)
66*4882a593Smuzhiyun scsi_device_resume(to_scsi_device(dev));
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun dev_dbg(dev, "scsi suspend: %d\n", err);
69*4882a593Smuzhiyun return err;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
scsi_dev_type_resume(struct device * dev,int (* cb)(struct device *,const struct dev_pm_ops *))72*4882a593Smuzhiyun static int scsi_dev_type_resume(struct device *dev,
73*4882a593Smuzhiyun int (*cb)(struct device *, const struct dev_pm_ops *))
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
76*4882a593Smuzhiyun int err = 0;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun err = cb(dev, pm);
79*4882a593Smuzhiyun scsi_device_resume(to_scsi_device(dev));
80*4882a593Smuzhiyun dev_dbg(dev, "scsi resume: %d\n", err);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (err == 0) {
83*4882a593Smuzhiyun pm_runtime_disable(dev);
84*4882a593Smuzhiyun err = pm_runtime_set_active(dev);
85*4882a593Smuzhiyun pm_runtime_enable(dev);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * Forcibly set runtime PM status of request queue to "active"
89*4882a593Smuzhiyun * to make sure we can again get requests from the queue
90*4882a593Smuzhiyun * (see also blk_pm_peek_request()).
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * The resume hook will correct runtime PM status of the disk.
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun if (!err && scsi_is_sdev_device(dev)) {
95*4882a593Smuzhiyun struct scsi_device *sdev = to_scsi_device(dev);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun blk_set_runtime_active(sdev->request_queue);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun return err;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun static int
scsi_bus_suspend_common(struct device * dev,int (* cb)(struct device *,const struct dev_pm_ops *))105*4882a593Smuzhiyun scsi_bus_suspend_common(struct device *dev,
106*4882a593Smuzhiyun int (*cb)(struct device *, const struct dev_pm_ops *))
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun int err = 0;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (scsi_is_sdev_device(dev)) {
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * All the high-level SCSI drivers that implement runtime
113*4882a593Smuzhiyun * PM treat runtime suspend, system suspend, and system
114*4882a593Smuzhiyun * hibernate nearly identically. In all cases the requirements
115*4882a593Smuzhiyun * for runtime suspension are stricter.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun if (pm_runtime_suspended(dev))
118*4882a593Smuzhiyun return 0;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun err = scsi_dev_type_suspend(dev, cb);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return err;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
async_sdev_resume(void * dev,async_cookie_t cookie)126*4882a593Smuzhiyun static void async_sdev_resume(void *dev, async_cookie_t cookie)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun scsi_dev_type_resume(dev, do_scsi_resume);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
async_sdev_thaw(void * dev,async_cookie_t cookie)131*4882a593Smuzhiyun static void async_sdev_thaw(void *dev, async_cookie_t cookie)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun scsi_dev_type_resume(dev, do_scsi_thaw);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
async_sdev_restore(void * dev,async_cookie_t cookie)136*4882a593Smuzhiyun static void async_sdev_restore(void *dev, async_cookie_t cookie)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun scsi_dev_type_resume(dev, do_scsi_restore);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
scsi_bus_resume_common(struct device * dev,int (* cb)(struct device *,const struct dev_pm_ops *))141*4882a593Smuzhiyun static int scsi_bus_resume_common(struct device *dev,
142*4882a593Smuzhiyun int (*cb)(struct device *, const struct dev_pm_ops *))
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun async_func_t fn;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (!scsi_is_sdev_device(dev))
147*4882a593Smuzhiyun fn = NULL;
148*4882a593Smuzhiyun else if (cb == do_scsi_resume)
149*4882a593Smuzhiyun fn = async_sdev_resume;
150*4882a593Smuzhiyun else if (cb == do_scsi_thaw)
151*4882a593Smuzhiyun fn = async_sdev_thaw;
152*4882a593Smuzhiyun else if (cb == do_scsi_restore)
153*4882a593Smuzhiyun fn = async_sdev_restore;
154*4882a593Smuzhiyun else
155*4882a593Smuzhiyun fn = NULL;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (fn) {
158*4882a593Smuzhiyun async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * If a user has disabled async probing a likely reason
162*4882a593Smuzhiyun * is due to a storage enclosure that does not inject
163*4882a593Smuzhiyun * staggered spin-ups. For safety, make resume
164*4882a593Smuzhiyun * synchronous as well in that case.
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun if (strncmp(scsi_scan_type, "async", 5) != 0)
167*4882a593Smuzhiyun async_synchronize_full_domain(&scsi_sd_pm_domain);
168*4882a593Smuzhiyun } else {
169*4882a593Smuzhiyun pm_runtime_disable(dev);
170*4882a593Smuzhiyun pm_runtime_set_active(dev);
171*4882a593Smuzhiyun pm_runtime_enable(dev);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
scsi_bus_prepare(struct device * dev)176*4882a593Smuzhiyun static int scsi_bus_prepare(struct device *dev)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun if (scsi_is_host_device(dev)) {
179*4882a593Smuzhiyun /* Wait until async scanning is finished */
180*4882a593Smuzhiyun scsi_complete_async_scans();
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
scsi_bus_suspend(struct device * dev)185*4882a593Smuzhiyun static int scsi_bus_suspend(struct device *dev)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun return scsi_bus_suspend_common(dev, do_scsi_suspend);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
scsi_bus_resume(struct device * dev)190*4882a593Smuzhiyun static int scsi_bus_resume(struct device *dev)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun return scsi_bus_resume_common(dev, do_scsi_resume);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
scsi_bus_freeze(struct device * dev)195*4882a593Smuzhiyun static int scsi_bus_freeze(struct device *dev)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun return scsi_bus_suspend_common(dev, do_scsi_freeze);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
scsi_bus_thaw(struct device * dev)200*4882a593Smuzhiyun static int scsi_bus_thaw(struct device *dev)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun return scsi_bus_resume_common(dev, do_scsi_thaw);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
scsi_bus_poweroff(struct device * dev)205*4882a593Smuzhiyun static int scsi_bus_poweroff(struct device *dev)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun return scsi_bus_suspend_common(dev, do_scsi_poweroff);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
scsi_bus_restore(struct device * dev)210*4882a593Smuzhiyun static int scsi_bus_restore(struct device *dev)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun return scsi_bus_resume_common(dev, do_scsi_restore);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun #else /* CONFIG_PM_SLEEP */
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun #define scsi_bus_prepare NULL
218*4882a593Smuzhiyun #define scsi_bus_suspend NULL
219*4882a593Smuzhiyun #define scsi_bus_resume NULL
220*4882a593Smuzhiyun #define scsi_bus_freeze NULL
221*4882a593Smuzhiyun #define scsi_bus_thaw NULL
222*4882a593Smuzhiyun #define scsi_bus_poweroff NULL
223*4882a593Smuzhiyun #define scsi_bus_restore NULL
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
226*4882a593Smuzhiyun
sdev_runtime_suspend(struct device * dev)227*4882a593Smuzhiyun static int sdev_runtime_suspend(struct device *dev)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
230*4882a593Smuzhiyun struct scsi_device *sdev = to_scsi_device(dev);
231*4882a593Smuzhiyun int err = 0;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun err = blk_pre_runtime_suspend(sdev->request_queue);
234*4882a593Smuzhiyun if (err)
235*4882a593Smuzhiyun return err;
236*4882a593Smuzhiyun if (pm && pm->runtime_suspend)
237*4882a593Smuzhiyun err = pm->runtime_suspend(dev);
238*4882a593Smuzhiyun blk_post_runtime_suspend(sdev->request_queue, err);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return err;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
scsi_runtime_suspend(struct device * dev)243*4882a593Smuzhiyun static int scsi_runtime_suspend(struct device *dev)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun int err = 0;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun dev_dbg(dev, "scsi_runtime_suspend\n");
248*4882a593Smuzhiyun if (scsi_is_sdev_device(dev))
249*4882a593Smuzhiyun err = sdev_runtime_suspend(dev);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Insert hooks here for targets, hosts, and transport classes */
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return err;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
sdev_runtime_resume(struct device * dev)256*4882a593Smuzhiyun static int sdev_runtime_resume(struct device *dev)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct scsi_device *sdev = to_scsi_device(dev);
259*4882a593Smuzhiyun const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
260*4882a593Smuzhiyun int err = 0;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun blk_pre_runtime_resume(sdev->request_queue);
263*4882a593Smuzhiyun if (pm && pm->runtime_resume)
264*4882a593Smuzhiyun err = pm->runtime_resume(dev);
265*4882a593Smuzhiyun blk_post_runtime_resume(sdev->request_queue);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun return err;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
scsi_runtime_resume(struct device * dev)270*4882a593Smuzhiyun static int scsi_runtime_resume(struct device *dev)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun int err = 0;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun dev_dbg(dev, "scsi_runtime_resume\n");
275*4882a593Smuzhiyun if (scsi_is_sdev_device(dev))
276*4882a593Smuzhiyun err = sdev_runtime_resume(dev);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Insert hooks here for targets, hosts, and transport classes */
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return err;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
scsi_runtime_idle(struct device * dev)283*4882a593Smuzhiyun static int scsi_runtime_idle(struct device *dev)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun dev_dbg(dev, "scsi_runtime_idle\n");
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* Insert hooks here for targets, hosts, and transport classes */
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (scsi_is_sdev_device(dev)) {
290*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev);
291*4882a593Smuzhiyun pm_runtime_autosuspend(dev);
292*4882a593Smuzhiyun return -EBUSY;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
scsi_autopm_get_device(struct scsi_device * sdev)298*4882a593Smuzhiyun int scsi_autopm_get_device(struct scsi_device *sdev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun int err;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun err = pm_runtime_get_sync(&sdev->sdev_gendev);
303*4882a593Smuzhiyun if (err < 0 && err !=-EACCES)
304*4882a593Smuzhiyun pm_runtime_put_sync(&sdev->sdev_gendev);
305*4882a593Smuzhiyun else
306*4882a593Smuzhiyun err = 0;
307*4882a593Smuzhiyun return err;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(scsi_autopm_get_device);
310*4882a593Smuzhiyun
scsi_autopm_put_device(struct scsi_device * sdev)311*4882a593Smuzhiyun void scsi_autopm_put_device(struct scsi_device *sdev)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun pm_runtime_put_sync(&sdev->sdev_gendev);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(scsi_autopm_put_device);
316*4882a593Smuzhiyun
scsi_autopm_get_target(struct scsi_target * starget)317*4882a593Smuzhiyun void scsi_autopm_get_target(struct scsi_target *starget)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun pm_runtime_get_sync(&starget->dev);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
scsi_autopm_put_target(struct scsi_target * starget)322*4882a593Smuzhiyun void scsi_autopm_put_target(struct scsi_target *starget)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun pm_runtime_put_sync(&starget->dev);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
scsi_autopm_get_host(struct Scsi_Host * shost)327*4882a593Smuzhiyun int scsi_autopm_get_host(struct Scsi_Host *shost)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun int err;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun err = pm_runtime_get_sync(&shost->shost_gendev);
332*4882a593Smuzhiyun if (err < 0 && err !=-EACCES)
333*4882a593Smuzhiyun pm_runtime_put_sync(&shost->shost_gendev);
334*4882a593Smuzhiyun else
335*4882a593Smuzhiyun err = 0;
336*4882a593Smuzhiyun return err;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
scsi_autopm_put_host(struct Scsi_Host * shost)339*4882a593Smuzhiyun void scsi_autopm_put_host(struct Scsi_Host *shost)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun pm_runtime_put_sync(&shost->shost_gendev);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun const struct dev_pm_ops scsi_bus_pm_ops = {
345*4882a593Smuzhiyun .prepare = scsi_bus_prepare,
346*4882a593Smuzhiyun .suspend = scsi_bus_suspend,
347*4882a593Smuzhiyun .resume = scsi_bus_resume,
348*4882a593Smuzhiyun .freeze = scsi_bus_freeze,
349*4882a593Smuzhiyun .thaw = scsi_bus_thaw,
350*4882a593Smuzhiyun .poweroff = scsi_bus_poweroff,
351*4882a593Smuzhiyun .restore = scsi_bus_restore,
352*4882a593Smuzhiyun .runtime_suspend = scsi_runtime_suspend,
353*4882a593Smuzhiyun .runtime_resume = scsi_runtime_resume,
354*4882a593Smuzhiyun .runtime_idle = scsi_runtime_idle,
355*4882a593Smuzhiyun };
356