1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * hosts.c Copyright (C) 1992 Drew Eckhardt
4*4882a593Smuzhiyun * Copyright (C) 1993, 1994, 1995 Eric Youngdale
5*4882a593Smuzhiyun * Copyright (C) 2002-2003 Christoph Hellwig
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * mid to lowlevel SCSI driver interface
8*4882a593Smuzhiyun * Initial versions: Drew Eckhardt
9*4882a593Smuzhiyun * Subsequent revisions: Eric Youngdale
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * <drew@colorado.edu>
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
14*4882a593Smuzhiyun * Added QLOGIC QLA1280 SCSI controller kernel host support.
15*4882a593Smuzhiyun * August 4, 1999 Fred Lewis, Intel DuPont
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Updated to reflect the new initialization scheme for the higher
18*4882a593Smuzhiyun * level of scsi drivers (sd/sr/st)
19*4882a593Smuzhiyun * September 17, 2000 Torben Mathiasen <tmm@image.dk>
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Restructured scsi_host lists and associated functions.
22*4882a593Smuzhiyun * September 04, 2002 Mike Anderson (andmike@us.ibm.com)
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/blkdev.h>
27*4882a593Smuzhiyun #include <linux/kernel.h>
28*4882a593Smuzhiyun #include <linux/slab.h>
29*4882a593Smuzhiyun #include <linux/kthread.h>
30*4882a593Smuzhiyun #include <linux/string.h>
31*4882a593Smuzhiyun #include <linux/mm.h>
32*4882a593Smuzhiyun #include <linux/init.h>
33*4882a593Smuzhiyun #include <linux/completion.h>
34*4882a593Smuzhiyun #include <linux/transport_class.h>
35*4882a593Smuzhiyun #include <linux/platform_device.h>
36*4882a593Smuzhiyun #include <linux/pm_runtime.h>
37*4882a593Smuzhiyun #include <linux/idr.h>
38*4882a593Smuzhiyun #include <scsi/scsi_device.h>
39*4882a593Smuzhiyun #include <scsi/scsi_host.h>
40*4882a593Smuzhiyun #include <scsi/scsi_transport.h>
41*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #include "scsi_priv.h"
44*4882a593Smuzhiyun #include "scsi_logging.h"
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun static int shost_eh_deadline = -1;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
50*4882a593Smuzhiyun MODULE_PARM_DESC(eh_deadline,
51*4882a593Smuzhiyun "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun static DEFINE_IDA(host_index_ida);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun
scsi_host_cls_release(struct device * dev)56*4882a593Smuzhiyun static void scsi_host_cls_release(struct device *dev)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun put_device(&class_to_shost(dev)->shost_gendev);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static struct class shost_class = {
62*4882a593Smuzhiyun .name = "scsi_host",
63*4882a593Smuzhiyun .dev_release = scsi_host_cls_release,
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /**
67*4882a593Smuzhiyun * scsi_host_set_state - Take the given host through the host state model.
68*4882a593Smuzhiyun * @shost: scsi host to change the state of.
69*4882a593Smuzhiyun * @state: state to change to.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * Returns zero if unsuccessful or an error if the requested
72*4882a593Smuzhiyun * transition is illegal.
73*4882a593Smuzhiyun **/
scsi_host_set_state(struct Scsi_Host * shost,enum scsi_host_state state)74*4882a593Smuzhiyun int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun enum scsi_host_state oldstate = shost->shost_state;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (state == oldstate)
79*4882a593Smuzhiyun return 0;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun switch (state) {
82*4882a593Smuzhiyun case SHOST_CREATED:
83*4882a593Smuzhiyun /* There are no legal states that come back to
84*4882a593Smuzhiyun * created. This is the manually initialised start
85*4882a593Smuzhiyun * state */
86*4882a593Smuzhiyun goto illegal;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun case SHOST_RUNNING:
89*4882a593Smuzhiyun switch (oldstate) {
90*4882a593Smuzhiyun case SHOST_CREATED:
91*4882a593Smuzhiyun case SHOST_RECOVERY:
92*4882a593Smuzhiyun break;
93*4882a593Smuzhiyun default:
94*4882a593Smuzhiyun goto illegal;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun break;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun case SHOST_RECOVERY:
99*4882a593Smuzhiyun switch (oldstate) {
100*4882a593Smuzhiyun case SHOST_RUNNING:
101*4882a593Smuzhiyun break;
102*4882a593Smuzhiyun default:
103*4882a593Smuzhiyun goto illegal;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun break;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun case SHOST_CANCEL:
108*4882a593Smuzhiyun switch (oldstate) {
109*4882a593Smuzhiyun case SHOST_CREATED:
110*4882a593Smuzhiyun case SHOST_RUNNING:
111*4882a593Smuzhiyun case SHOST_CANCEL_RECOVERY:
112*4882a593Smuzhiyun break;
113*4882a593Smuzhiyun default:
114*4882a593Smuzhiyun goto illegal;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun break;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun case SHOST_DEL:
119*4882a593Smuzhiyun switch (oldstate) {
120*4882a593Smuzhiyun case SHOST_CANCEL:
121*4882a593Smuzhiyun case SHOST_DEL_RECOVERY:
122*4882a593Smuzhiyun break;
123*4882a593Smuzhiyun default:
124*4882a593Smuzhiyun goto illegal;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun case SHOST_CANCEL_RECOVERY:
129*4882a593Smuzhiyun switch (oldstate) {
130*4882a593Smuzhiyun case SHOST_CANCEL:
131*4882a593Smuzhiyun case SHOST_RECOVERY:
132*4882a593Smuzhiyun break;
133*4882a593Smuzhiyun default:
134*4882a593Smuzhiyun goto illegal;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun break;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun case SHOST_DEL_RECOVERY:
139*4882a593Smuzhiyun switch (oldstate) {
140*4882a593Smuzhiyun case SHOST_CANCEL_RECOVERY:
141*4882a593Smuzhiyun break;
142*4882a593Smuzhiyun default:
143*4882a593Smuzhiyun goto illegal;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun shost->shost_state = state;
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun illegal:
151*4882a593Smuzhiyun SCSI_LOG_ERROR_RECOVERY(1,
152*4882a593Smuzhiyun shost_printk(KERN_ERR, shost,
153*4882a593Smuzhiyun "Illegal host state transition"
154*4882a593Smuzhiyun "%s->%s\n",
155*4882a593Smuzhiyun scsi_host_state_name(oldstate),
156*4882a593Smuzhiyun scsi_host_state_name(state)));
157*4882a593Smuzhiyun return -EINVAL;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun * scsi_remove_host - remove a scsi host
162*4882a593Smuzhiyun * @shost: a pointer to a scsi host to remove
163*4882a593Smuzhiyun **/
scsi_remove_host(struct Scsi_Host * shost)164*4882a593Smuzhiyun void scsi_remove_host(struct Scsi_Host *shost)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun unsigned long flags;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun mutex_lock(&shost->scan_mutex);
169*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
170*4882a593Smuzhiyun if (scsi_host_set_state(shost, SHOST_CANCEL))
171*4882a593Smuzhiyun if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
172*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
173*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
174*4882a593Smuzhiyun return;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun scsi_autopm_get_host(shost);
179*4882a593Smuzhiyun flush_workqueue(shost->tmf_work_q);
180*4882a593Smuzhiyun scsi_forget_host(shost);
181*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
182*4882a593Smuzhiyun scsi_proc_host_rm(shost);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
185*4882a593Smuzhiyun if (scsi_host_set_state(shost, SHOST_DEL))
186*4882a593Smuzhiyun BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
187*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun transport_unregister_device(&shost->shost_gendev);
190*4882a593Smuzhiyun device_unregister(&shost->shost_dev);
191*4882a593Smuzhiyun device_del(&shost->shost_gendev);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_remove_host);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /**
196*4882a593Smuzhiyun * scsi_add_host_with_dma - add a scsi host with dma device
197*4882a593Smuzhiyun * @shost: scsi host pointer to add
198*4882a593Smuzhiyun * @dev: a struct device of type scsi class
199*4882a593Smuzhiyun * @dma_dev: dma device for the host
200*4882a593Smuzhiyun *
201*4882a593Smuzhiyun * Note: You rarely need to worry about this unless you're in a
202*4882a593Smuzhiyun * virtualised host environments, so use the simpler scsi_add_host()
203*4882a593Smuzhiyun * function instead.
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * Return value:
206*4882a593Smuzhiyun * 0 on success / != 0 for error
207*4882a593Smuzhiyun **/
scsi_add_host_with_dma(struct Scsi_Host * shost,struct device * dev,struct device * dma_dev)208*4882a593Smuzhiyun int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
209*4882a593Smuzhiyun struct device *dma_dev)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct scsi_host_template *sht = shost->hostt;
212*4882a593Smuzhiyun int error = -EINVAL;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun shost_printk(KERN_INFO, shost, "%s\n",
215*4882a593Smuzhiyun sht->info ? sht->info(shost) : sht->name);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (!shost->can_queue) {
218*4882a593Smuzhiyun shost_printk(KERN_ERR, shost,
219*4882a593Smuzhiyun "can_queue = 0 no longer supported\n");
220*4882a593Smuzhiyun goto fail;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
224*4882a593Smuzhiyun shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
225*4882a593Smuzhiyun shost->can_queue);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun error = scsi_init_sense_cache(shost);
228*4882a593Smuzhiyun if (error)
229*4882a593Smuzhiyun goto fail;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun error = scsi_mq_setup_tags(shost);
232*4882a593Smuzhiyun if (error)
233*4882a593Smuzhiyun goto fail;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (!shost->shost_gendev.parent)
236*4882a593Smuzhiyun shost->shost_gendev.parent = dev ? dev : &platform_bus;
237*4882a593Smuzhiyun if (!dma_dev)
238*4882a593Smuzhiyun dma_dev = shost->shost_gendev.parent;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun shost->dma_dev = dma_dev;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * Increase usage count temporarily here so that calling
244*4882a593Smuzhiyun * scsi_autopm_put_host() will trigger runtime idle if there is
245*4882a593Smuzhiyun * nothing else preventing suspending the device.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun pm_runtime_get_noresume(&shost->shost_gendev);
248*4882a593Smuzhiyun pm_runtime_set_active(&shost->shost_gendev);
249*4882a593Smuzhiyun pm_runtime_enable(&shost->shost_gendev);
250*4882a593Smuzhiyun device_enable_async_suspend(&shost->shost_gendev);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun error = device_add(&shost->shost_gendev);
253*4882a593Smuzhiyun if (error)
254*4882a593Smuzhiyun goto out_disable_runtime_pm;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun scsi_host_set_state(shost, SHOST_RUNNING);
257*4882a593Smuzhiyun get_device(shost->shost_gendev.parent);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun device_enable_async_suspend(&shost->shost_dev);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun get_device(&shost->shost_gendev);
262*4882a593Smuzhiyun error = device_add(&shost->shost_dev);
263*4882a593Smuzhiyun if (error)
264*4882a593Smuzhiyun goto out_del_gendev;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (shost->transportt->host_size) {
267*4882a593Smuzhiyun shost->shost_data = kzalloc(shost->transportt->host_size,
268*4882a593Smuzhiyun GFP_KERNEL);
269*4882a593Smuzhiyun if (shost->shost_data == NULL) {
270*4882a593Smuzhiyun error = -ENOMEM;
271*4882a593Smuzhiyun goto out_del_dev;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (shost->transportt->create_work_queue) {
276*4882a593Smuzhiyun snprintf(shost->work_q_name, sizeof(shost->work_q_name),
277*4882a593Smuzhiyun "scsi_wq_%d", shost->host_no);
278*4882a593Smuzhiyun shost->work_q = alloc_workqueue("%s",
279*4882a593Smuzhiyun WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
280*4882a593Smuzhiyun 1, shost->work_q_name);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (!shost->work_q) {
283*4882a593Smuzhiyun error = -EINVAL;
284*4882a593Smuzhiyun goto out_del_dev;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun error = scsi_sysfs_add_host(shost);
289*4882a593Smuzhiyun if (error)
290*4882a593Smuzhiyun goto out_del_dev;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun scsi_proc_host_add(shost);
293*4882a593Smuzhiyun scsi_autopm_put_host(shost);
294*4882a593Smuzhiyun return error;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * Any host allocation in this function will be freed in
298*4882a593Smuzhiyun * scsi_host_dev_release().
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun out_del_dev:
301*4882a593Smuzhiyun device_del(&shost->shost_dev);
302*4882a593Smuzhiyun out_del_gendev:
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun * Host state is SHOST_RUNNING so we have to explicitly release
305*4882a593Smuzhiyun * ->shost_dev.
306*4882a593Smuzhiyun */
307*4882a593Smuzhiyun put_device(&shost->shost_dev);
308*4882a593Smuzhiyun device_del(&shost->shost_gendev);
309*4882a593Smuzhiyun out_disable_runtime_pm:
310*4882a593Smuzhiyun device_disable_async_suspend(&shost->shost_gendev);
311*4882a593Smuzhiyun pm_runtime_disable(&shost->shost_gendev);
312*4882a593Smuzhiyun pm_runtime_set_suspended(&shost->shost_gendev);
313*4882a593Smuzhiyun pm_runtime_put_noidle(&shost->shost_gendev);
314*4882a593Smuzhiyun fail:
315*4882a593Smuzhiyun return error;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_add_host_with_dma);
318*4882a593Smuzhiyun
scsi_host_dev_release(struct device * dev)319*4882a593Smuzhiyun static void scsi_host_dev_release(struct device *dev)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(dev);
322*4882a593Smuzhiyun struct device *parent = dev->parent;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun scsi_proc_hostdir_rm(shost->hostt);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
327*4882a593Smuzhiyun rcu_barrier();
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (shost->tmf_work_q)
330*4882a593Smuzhiyun destroy_workqueue(shost->tmf_work_q);
331*4882a593Smuzhiyun if (shost->ehandler)
332*4882a593Smuzhiyun kthread_stop(shost->ehandler);
333*4882a593Smuzhiyun if (shost->work_q)
334*4882a593Smuzhiyun destroy_workqueue(shost->work_q);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (shost->shost_state == SHOST_CREATED) {
337*4882a593Smuzhiyun /*
338*4882a593Smuzhiyun * Free the shost_dev device name here if scsi_host_alloc()
339*4882a593Smuzhiyun * and scsi_host_put() have been called but neither
340*4882a593Smuzhiyun * scsi_host_add() nor scsi_host_remove() has been called.
341*4882a593Smuzhiyun * This avoids that the memory allocated for the shost_dev
342*4882a593Smuzhiyun * name is leaked.
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun kfree(dev_name(&shost->shost_dev));
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (shost->tag_set.tags)
348*4882a593Smuzhiyun scsi_mq_destroy_tags(shost);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun kfree(shost->shost_data);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun ida_simple_remove(&host_index_ida, shost->host_no);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (shost->shost_state != SHOST_CREATED)
355*4882a593Smuzhiyun put_device(parent);
356*4882a593Smuzhiyun kfree(shost);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun static struct device_type scsi_host_type = {
360*4882a593Smuzhiyun .name = "scsi_host",
361*4882a593Smuzhiyun .release = scsi_host_dev_release,
362*4882a593Smuzhiyun };
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /**
365*4882a593Smuzhiyun * scsi_host_alloc - register a scsi host adapter instance.
366*4882a593Smuzhiyun * @sht: pointer to scsi host template
367*4882a593Smuzhiyun * @privsize: extra bytes to allocate for driver
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * Note:
370*4882a593Smuzhiyun * Allocate a new Scsi_Host and perform basic initialization.
371*4882a593Smuzhiyun * The host is not published to the scsi midlayer until scsi_add_host
372*4882a593Smuzhiyun * is called.
373*4882a593Smuzhiyun *
374*4882a593Smuzhiyun * Return value:
375*4882a593Smuzhiyun * Pointer to a new Scsi_Host
376*4882a593Smuzhiyun **/
scsi_host_alloc(struct scsi_host_template * sht,int privsize)377*4882a593Smuzhiyun struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun struct Scsi_Host *shost;
380*4882a593Smuzhiyun gfp_t gfp_mask = GFP_KERNEL;
381*4882a593Smuzhiyun int index;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (sht->unchecked_isa_dma && privsize)
384*4882a593Smuzhiyun gfp_mask |= __GFP_DMA;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
387*4882a593Smuzhiyun if (!shost)
388*4882a593Smuzhiyun return NULL;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun shost->host_lock = &shost->default_lock;
391*4882a593Smuzhiyun spin_lock_init(shost->host_lock);
392*4882a593Smuzhiyun shost->shost_state = SHOST_CREATED;
393*4882a593Smuzhiyun INIT_LIST_HEAD(&shost->__devices);
394*4882a593Smuzhiyun INIT_LIST_HEAD(&shost->__targets);
395*4882a593Smuzhiyun INIT_LIST_HEAD(&shost->eh_cmd_q);
396*4882a593Smuzhiyun INIT_LIST_HEAD(&shost->starved_list);
397*4882a593Smuzhiyun init_waitqueue_head(&shost->host_wait);
398*4882a593Smuzhiyun mutex_init(&shost->scan_mutex);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
401*4882a593Smuzhiyun if (index < 0) {
402*4882a593Smuzhiyun kfree(shost);
403*4882a593Smuzhiyun return NULL;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun shost->host_no = index;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun shost->dma_channel = 0xff;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* These three are default values which can be overridden */
410*4882a593Smuzhiyun shost->max_channel = 0;
411*4882a593Smuzhiyun shost->max_id = 8;
412*4882a593Smuzhiyun shost->max_lun = 8;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* Give each shost a default transportt */
415*4882a593Smuzhiyun shost->transportt = &blank_transport_template;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun * All drivers right now should be able to handle 12 byte
419*4882a593Smuzhiyun * commands. Every so often there are requests for 16 byte
420*4882a593Smuzhiyun * commands, but individual low-level drivers need to certify that
421*4882a593Smuzhiyun * they actually do something sensible with such commands.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun shost->max_cmd_len = 12;
424*4882a593Smuzhiyun shost->hostt = sht;
425*4882a593Smuzhiyun shost->this_id = sht->this_id;
426*4882a593Smuzhiyun shost->can_queue = sht->can_queue;
427*4882a593Smuzhiyun shost->sg_tablesize = sht->sg_tablesize;
428*4882a593Smuzhiyun shost->sg_prot_tablesize = sht->sg_prot_tablesize;
429*4882a593Smuzhiyun shost->cmd_per_lun = sht->cmd_per_lun;
430*4882a593Smuzhiyun shost->unchecked_isa_dma = sht->unchecked_isa_dma;
431*4882a593Smuzhiyun shost->no_write_same = sht->no_write_same;
432*4882a593Smuzhiyun shost->host_tagset = sht->host_tagset;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
435*4882a593Smuzhiyun shost->eh_deadline = -1;
436*4882a593Smuzhiyun else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
437*4882a593Smuzhiyun shost_printk(KERN_WARNING, shost,
438*4882a593Smuzhiyun "eh_deadline %u too large, setting to %u\n",
439*4882a593Smuzhiyun shost_eh_deadline, INT_MAX / HZ);
440*4882a593Smuzhiyun shost->eh_deadline = INT_MAX;
441*4882a593Smuzhiyun } else
442*4882a593Smuzhiyun shost->eh_deadline = shost_eh_deadline * HZ;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun if (sht->supported_mode == MODE_UNKNOWN)
445*4882a593Smuzhiyun /* means we didn't set it ... default to INITIATOR */
446*4882a593Smuzhiyun shost->active_mode = MODE_INITIATOR;
447*4882a593Smuzhiyun else
448*4882a593Smuzhiyun shost->active_mode = sht->supported_mode;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (sht->max_host_blocked)
451*4882a593Smuzhiyun shost->max_host_blocked = sht->max_host_blocked;
452*4882a593Smuzhiyun else
453*4882a593Smuzhiyun shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /*
456*4882a593Smuzhiyun * If the driver imposes no hard sector transfer limit, start at
457*4882a593Smuzhiyun * machine infinity initially.
458*4882a593Smuzhiyun */
459*4882a593Smuzhiyun if (sht->max_sectors)
460*4882a593Smuzhiyun shost->max_sectors = sht->max_sectors;
461*4882a593Smuzhiyun else
462*4882a593Smuzhiyun shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (sht->max_segment_size)
465*4882a593Smuzhiyun shost->max_segment_size = sht->max_segment_size;
466*4882a593Smuzhiyun else
467*4882a593Smuzhiyun shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * assume a 4GB boundary, if not set
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun if (sht->dma_boundary)
473*4882a593Smuzhiyun shost->dma_boundary = sht->dma_boundary;
474*4882a593Smuzhiyun else
475*4882a593Smuzhiyun shost->dma_boundary = 0xffffffff;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if (sht->virt_boundary_mask)
478*4882a593Smuzhiyun shost->virt_boundary_mask = sht->virt_boundary_mask;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun device_initialize(&shost->shost_gendev);
481*4882a593Smuzhiyun dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
482*4882a593Smuzhiyun shost->shost_gendev.bus = &scsi_bus_type;
483*4882a593Smuzhiyun shost->shost_gendev.type = &scsi_host_type;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun device_initialize(&shost->shost_dev);
486*4882a593Smuzhiyun shost->shost_dev.parent = &shost->shost_gendev;
487*4882a593Smuzhiyun shost->shost_dev.class = &shost_class;
488*4882a593Smuzhiyun dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
489*4882a593Smuzhiyun shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun shost->ehandler = kthread_run(scsi_error_handler, shost,
492*4882a593Smuzhiyun "scsi_eh_%d", shost->host_no);
493*4882a593Smuzhiyun if (IS_ERR(shost->ehandler)) {
494*4882a593Smuzhiyun shost_printk(KERN_WARNING, shost,
495*4882a593Smuzhiyun "error handler thread failed to spawn, error = %ld\n",
496*4882a593Smuzhiyun PTR_ERR(shost->ehandler));
497*4882a593Smuzhiyun shost->ehandler = NULL;
498*4882a593Smuzhiyun goto fail;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
502*4882a593Smuzhiyun WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
503*4882a593Smuzhiyun 1, shost->host_no);
504*4882a593Smuzhiyun if (!shost->tmf_work_q) {
505*4882a593Smuzhiyun shost_printk(KERN_WARNING, shost,
506*4882a593Smuzhiyun "failed to create tmf workq\n");
507*4882a593Smuzhiyun goto fail;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun scsi_proc_hostdir_add(shost->hostt);
510*4882a593Smuzhiyun return shost;
511*4882a593Smuzhiyun fail:
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * Host state is still SHOST_CREATED and that is enough to release
514*4882a593Smuzhiyun * ->shost_gendev. scsi_host_dev_release() will free
515*4882a593Smuzhiyun * dev_name(&shost->shost_dev).
516*4882a593Smuzhiyun */
517*4882a593Smuzhiyun put_device(&shost->shost_gendev);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun return NULL;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_host_alloc);
522*4882a593Smuzhiyun
__scsi_host_match(struct device * dev,const void * data)523*4882a593Smuzhiyun static int __scsi_host_match(struct device *dev, const void *data)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct Scsi_Host *p;
526*4882a593Smuzhiyun const unsigned short *hostnum = data;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun p = class_to_shost(dev);
529*4882a593Smuzhiyun return p->host_no == *hostnum;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /**
533*4882a593Smuzhiyun * scsi_host_lookup - get a reference to a Scsi_Host by host no
534*4882a593Smuzhiyun * @hostnum: host number to locate
535*4882a593Smuzhiyun *
536*4882a593Smuzhiyun * Return value:
537*4882a593Smuzhiyun * A pointer to located Scsi_Host or NULL.
538*4882a593Smuzhiyun *
539*4882a593Smuzhiyun * The caller must do a scsi_host_put() to drop the reference
540*4882a593Smuzhiyun * that scsi_host_get() took. The put_device() below dropped
541*4882a593Smuzhiyun * the reference from class_find_device().
542*4882a593Smuzhiyun **/
scsi_host_lookup(unsigned short hostnum)543*4882a593Smuzhiyun struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct device *cdev;
546*4882a593Smuzhiyun struct Scsi_Host *shost = NULL;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun cdev = class_find_device(&shost_class, NULL, &hostnum,
549*4882a593Smuzhiyun __scsi_host_match);
550*4882a593Smuzhiyun if (cdev) {
551*4882a593Smuzhiyun shost = scsi_host_get(class_to_shost(cdev));
552*4882a593Smuzhiyun put_device(cdev);
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun return shost;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_host_lookup);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /**
559*4882a593Smuzhiyun * scsi_host_get - inc a Scsi_Host ref count
560*4882a593Smuzhiyun * @shost: Pointer to Scsi_Host to inc.
561*4882a593Smuzhiyun **/
scsi_host_get(struct Scsi_Host * shost)562*4882a593Smuzhiyun struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun if ((shost->shost_state == SHOST_DEL) ||
565*4882a593Smuzhiyun !get_device(&shost->shost_gendev))
566*4882a593Smuzhiyun return NULL;
567*4882a593Smuzhiyun return shost;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_host_get);
570*4882a593Smuzhiyun
scsi_host_check_in_flight(struct request * rq,void * data,bool reserved)571*4882a593Smuzhiyun static bool scsi_host_check_in_flight(struct request *rq, void *data,
572*4882a593Smuzhiyun bool reserved)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun int *count = data;
575*4882a593Smuzhiyun struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
578*4882a593Smuzhiyun (*count)++;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun return true;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /**
584*4882a593Smuzhiyun * scsi_host_busy - Return the host busy counter
585*4882a593Smuzhiyun * @shost: Pointer to Scsi_Host to inc.
586*4882a593Smuzhiyun **/
scsi_host_busy(struct Scsi_Host * shost)587*4882a593Smuzhiyun int scsi_host_busy(struct Scsi_Host *shost)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun int cnt = 0;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun blk_mq_tagset_busy_iter(&shost->tag_set,
592*4882a593Smuzhiyun scsi_host_check_in_flight, &cnt);
593*4882a593Smuzhiyun return cnt;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_host_busy);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /**
598*4882a593Smuzhiyun * scsi_host_put - dec a Scsi_Host ref count
599*4882a593Smuzhiyun * @shost: Pointer to Scsi_Host to dec.
600*4882a593Smuzhiyun **/
scsi_host_put(struct Scsi_Host * shost)601*4882a593Smuzhiyun void scsi_host_put(struct Scsi_Host *shost)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun put_device(&shost->shost_gendev);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_host_put);
606*4882a593Smuzhiyun
scsi_init_hosts(void)607*4882a593Smuzhiyun int scsi_init_hosts(void)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun return class_register(&shost_class);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
scsi_exit_hosts(void)612*4882a593Smuzhiyun void scsi_exit_hosts(void)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun class_unregister(&shost_class);
615*4882a593Smuzhiyun ida_destroy(&host_index_ida);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
scsi_is_host_device(const struct device * dev)618*4882a593Smuzhiyun int scsi_is_host_device(const struct device *dev)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun return dev->type == &scsi_host_type;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_is_host_device);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /**
625*4882a593Smuzhiyun * scsi_queue_work - Queue work to the Scsi_Host workqueue.
626*4882a593Smuzhiyun * @shost: Pointer to Scsi_Host.
627*4882a593Smuzhiyun * @work: Work to queue for execution.
628*4882a593Smuzhiyun *
629*4882a593Smuzhiyun * Return value:
630*4882a593Smuzhiyun * 1 - work queued for execution
631*4882a593Smuzhiyun * 0 - work is already queued
632*4882a593Smuzhiyun * -EINVAL - work queue doesn't exist
633*4882a593Smuzhiyun **/
scsi_queue_work(struct Scsi_Host * shost,struct work_struct * work)634*4882a593Smuzhiyun int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun if (unlikely(!shost->work_q)) {
637*4882a593Smuzhiyun shost_printk(KERN_ERR, shost,
638*4882a593Smuzhiyun "ERROR: Scsi host '%s' attempted to queue scsi-work, "
639*4882a593Smuzhiyun "when no workqueue created.\n", shost->hostt->name);
640*4882a593Smuzhiyun dump_stack();
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun return -EINVAL;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun return queue_work(shost->work_q, work);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(scsi_queue_work);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /**
650*4882a593Smuzhiyun * scsi_flush_work - Flush a Scsi_Host's workqueue.
651*4882a593Smuzhiyun * @shost: Pointer to Scsi_Host.
652*4882a593Smuzhiyun **/
scsi_flush_work(struct Scsi_Host * shost)653*4882a593Smuzhiyun void scsi_flush_work(struct Scsi_Host *shost)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun if (!shost->work_q) {
656*4882a593Smuzhiyun shost_printk(KERN_ERR, shost,
657*4882a593Smuzhiyun "ERROR: Scsi host '%s' attempted to flush scsi-work, "
658*4882a593Smuzhiyun "when no workqueue created.\n", shost->hostt->name);
659*4882a593Smuzhiyun dump_stack();
660*4882a593Smuzhiyun return;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun flush_workqueue(shost->work_q);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(scsi_flush_work);
666*4882a593Smuzhiyun
complete_all_cmds_iter(struct request * rq,void * data,bool rsvd)667*4882a593Smuzhiyun static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
670*4882a593Smuzhiyun int status = *(int *)data;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun scsi_dma_unmap(scmd);
673*4882a593Smuzhiyun scmd->result = status << 16;
674*4882a593Smuzhiyun scmd->scsi_done(scmd);
675*4882a593Smuzhiyun return true;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /**
679*4882a593Smuzhiyun * scsi_host_complete_all_commands - Terminate all running commands
680*4882a593Smuzhiyun * @shost: Scsi Host on which commands should be terminated
681*4882a593Smuzhiyun * @status: Status to be set for the terminated commands
682*4882a593Smuzhiyun *
683*4882a593Smuzhiyun * There is no protection against modification of the number
684*4882a593Smuzhiyun * of outstanding commands. It is the responsibility of the
685*4882a593Smuzhiyun * caller to ensure that concurrent I/O submission and/or
686*4882a593Smuzhiyun * completion is stopped when calling this function.
687*4882a593Smuzhiyun */
scsi_host_complete_all_commands(struct Scsi_Host * shost,int status)688*4882a593Smuzhiyun void scsi_host_complete_all_commands(struct Scsi_Host *shost, int status)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
691*4882a593Smuzhiyun &status);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun struct scsi_host_busy_iter_data {
696*4882a593Smuzhiyun bool (*fn)(struct scsi_cmnd *, void *, bool);
697*4882a593Smuzhiyun void *priv;
698*4882a593Smuzhiyun };
699*4882a593Smuzhiyun
__scsi_host_busy_iter_fn(struct request * req,void * priv,bool reserved)700*4882a593Smuzhiyun static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
701*4882a593Smuzhiyun bool reserved)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun struct scsi_host_busy_iter_data *iter_data = priv;
704*4882a593Smuzhiyun struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun return iter_data->fn(sc, iter_data->priv, reserved);
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /**
710*4882a593Smuzhiyun * scsi_host_busy_iter - Iterate over all busy commands
711*4882a593Smuzhiyun * @shost: Pointer to Scsi_Host.
712*4882a593Smuzhiyun * @fn: Function to call on each busy command
713*4882a593Smuzhiyun * @priv: Data pointer passed to @fn
714*4882a593Smuzhiyun *
715*4882a593Smuzhiyun * If locking against concurrent command completions is required
716*4882a593Smuzhiyun * ithas to be provided by the caller
717*4882a593Smuzhiyun **/
scsi_host_busy_iter(struct Scsi_Host * shost,bool (* fn)(struct scsi_cmnd *,void *,bool),void * priv)718*4882a593Smuzhiyun void scsi_host_busy_iter(struct Scsi_Host *shost,
719*4882a593Smuzhiyun bool (*fn)(struct scsi_cmnd *, void *, bool),
720*4882a593Smuzhiyun void *priv)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun struct scsi_host_busy_iter_data iter_data = {
723*4882a593Smuzhiyun .fn = fn,
724*4882a593Smuzhiyun .priv = priv,
725*4882a593Smuzhiyun };
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn,
728*4882a593Smuzhiyun &iter_data);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(scsi_host_busy_iter);
731