1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * scsi_scan.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2000 Eric Youngdale,
6*4882a593Smuzhiyun * Copyright (C) 2002 Patrick Mansfield
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * The general scanning/probing algorithm is as follows, exceptions are
9*4882a593Smuzhiyun * made to it depending on device specific flags, compilation options, and
10*4882a593Smuzhiyun * global variable (boot or module load time) settings.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * A specific LUN is scanned via an INQUIRY command; if the LUN has a
13*4882a593Smuzhiyun * device attached, a scsi_device is allocated and setup for it.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * For every id of every channel on the given host:
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Scan LUN 0; if the target responds to LUN 0 (even if there is no
18*4882a593Smuzhiyun * device or storage attached to LUN 0):
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * If LUN 0 has a device attached, allocate and setup a
21*4882a593Smuzhiyun * scsi_device for it.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * If target is SCSI-3 or up, issue a REPORT LUN, and scan
24*4882a593Smuzhiyun * all of the LUNs returned by the REPORT LUN; else,
25*4882a593Smuzhiyun * sequentially scan LUNs up until some maximum is reached,
26*4882a593Smuzhiyun * or a LUN is seen that cannot have a device attached to it.
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/module.h>
30*4882a593Smuzhiyun #include <linux/moduleparam.h>
31*4882a593Smuzhiyun #include <linux/init.h>
32*4882a593Smuzhiyun #include <linux/blkdev.h>
33*4882a593Smuzhiyun #include <linux/delay.h>
34*4882a593Smuzhiyun #include <linux/kthread.h>
35*4882a593Smuzhiyun #include <linux/spinlock.h>
36*4882a593Smuzhiyun #include <linux/async.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun #include <asm/unaligned.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include <scsi/scsi.h>
41*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
42*4882a593Smuzhiyun #include <scsi/scsi_device.h>
43*4882a593Smuzhiyun #include <scsi/scsi_driver.h>
44*4882a593Smuzhiyun #include <scsi/scsi_devinfo.h>
45*4882a593Smuzhiyun #include <scsi/scsi_host.h>
46*4882a593Smuzhiyun #include <scsi/scsi_transport.h>
47*4882a593Smuzhiyun #include <scsi/scsi_dh.h>
48*4882a593Smuzhiyun #include <scsi/scsi_eh.h>
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include "scsi_priv.h"
51*4882a593Smuzhiyun #include "scsi_logging.h"
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54*4882a593Smuzhiyun " SCSI scanning, some SCSI devices might not be configured\n"
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * Default timeout
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun #define SCSI_TIMEOUT (2*HZ)
60*4882a593Smuzhiyun #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Prefix values for the SCSI id's (stored in sysfs name field)
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun #define SCSI_UID_SER_NUM 'S'
66*4882a593Smuzhiyun #define SCSI_UID_UNKNOWN 'Z'
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Return values of some of the scanning functions.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
72*4882a593Smuzhiyun * includes allocation or general failures preventing IO from being sent.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
75*4882a593Smuzhiyun * on the given LUN.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
78*4882a593Smuzhiyun * given LUN.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun #define SCSI_SCAN_NO_RESPONSE 0
81*4882a593Smuzhiyun #define SCSI_SCAN_TARGET_PRESENT 1
82*4882a593Smuzhiyun #define SCSI_SCAN_LUN_PRESENT 2
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun static const char *scsi_null_device_strs = "nullnullnullnull";
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #define MAX_SCSI_LUNS 512
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static u64 max_scsi_luns = MAX_SCSI_LUNS;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91*4882a593Smuzhiyun MODULE_PARM_DESC(max_luns,
92*4882a593Smuzhiyun "last scsi LUN (should be between 1 and 2^64-1)");
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #ifdef CONFIG_SCSI_SCAN_ASYNC
95*4882a593Smuzhiyun #define SCSI_SCAN_TYPE_DEFAULT "async"
96*4882a593Smuzhiyun #else
97*4882a593Smuzhiyun #define SCSI_SCAN_TYPE_DEFAULT "sync"
98*4882a593Smuzhiyun #endif
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103*4882a593Smuzhiyun S_IRUGO|S_IWUSR);
104*4882a593Smuzhiyun MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105*4882a593Smuzhiyun "Setting to 'manual' disables automatic scanning, but allows "
106*4882a593Smuzhiyun "for manual device scan via the 'scan' sysfs attribute.");
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111*4882a593Smuzhiyun MODULE_PARM_DESC(inq_timeout,
112*4882a593Smuzhiyun "Timeout (in seconds) waiting for devices to answer INQUIRY."
113*4882a593Smuzhiyun " Default is 20. Some devices may need more; most need less.");
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* This lock protects only this list */
116*4882a593Smuzhiyun static DEFINE_SPINLOCK(async_scan_lock);
117*4882a593Smuzhiyun static LIST_HEAD(scanning_hosts);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun struct async_scan_data {
120*4882a593Smuzhiyun struct list_head list;
121*4882a593Smuzhiyun struct Scsi_Host *shost;
122*4882a593Smuzhiyun struct completion prev_finished;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /**
126*4882a593Smuzhiyun * scsi_complete_async_scans - Wait for asynchronous scans to complete
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * When this function returns, any host which started scanning before
129*4882a593Smuzhiyun * this function was called will have finished its scan. Hosts which
130*4882a593Smuzhiyun * started scanning after this function was called may or may not have
131*4882a593Smuzhiyun * finished.
132*4882a593Smuzhiyun */
scsi_complete_async_scans(void)133*4882a593Smuzhiyun int scsi_complete_async_scans(void)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct async_scan_data *data;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun do {
138*4882a593Smuzhiyun if (list_empty(&scanning_hosts))
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun /* If we can't get memory immediately, that's OK. Just
141*4882a593Smuzhiyun * sleep a little. Even if we never get memory, the async
142*4882a593Smuzhiyun * scans will finish eventually.
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun data = kmalloc(sizeof(*data), GFP_KERNEL);
145*4882a593Smuzhiyun if (!data)
146*4882a593Smuzhiyun msleep(1);
147*4882a593Smuzhiyun } while (!data);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun data->shost = NULL;
150*4882a593Smuzhiyun init_completion(&data->prev_finished);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun spin_lock(&async_scan_lock);
153*4882a593Smuzhiyun /* Check that there's still somebody else on the list */
154*4882a593Smuzhiyun if (list_empty(&scanning_hosts))
155*4882a593Smuzhiyun goto done;
156*4882a593Smuzhiyun list_add_tail(&data->list, &scanning_hosts);
157*4882a593Smuzhiyun spin_unlock(&async_scan_lock);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
160*4882a593Smuzhiyun wait_for_completion(&data->prev_finished);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun spin_lock(&async_scan_lock);
163*4882a593Smuzhiyun list_del(&data->list);
164*4882a593Smuzhiyun if (!list_empty(&scanning_hosts)) {
165*4882a593Smuzhiyun struct async_scan_data *next = list_entry(scanning_hosts.next,
166*4882a593Smuzhiyun struct async_scan_data, list);
167*4882a593Smuzhiyun complete(&next->prev_finished);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun done:
170*4882a593Smuzhiyun spin_unlock(&async_scan_lock);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun kfree(data);
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /**
177*4882a593Smuzhiyun * scsi_unlock_floptical - unlock device via a special MODE SENSE command
178*4882a593Smuzhiyun * @sdev: scsi device to send command to
179*4882a593Smuzhiyun * @result: area to store the result of the MODE SENSE
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * Description:
182*4882a593Smuzhiyun * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
183*4882a593Smuzhiyun * Called for BLIST_KEY devices.
184*4882a593Smuzhiyun **/
scsi_unlock_floptical(struct scsi_device * sdev,unsigned char * result)185*4882a593Smuzhiyun static void scsi_unlock_floptical(struct scsi_device *sdev,
186*4882a593Smuzhiyun unsigned char *result)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun unsigned char scsi_cmd[MAX_COMMAND_SIZE];
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
191*4882a593Smuzhiyun scsi_cmd[0] = MODE_SENSE;
192*4882a593Smuzhiyun scsi_cmd[1] = 0;
193*4882a593Smuzhiyun scsi_cmd[2] = 0x2e;
194*4882a593Smuzhiyun scsi_cmd[3] = 0;
195*4882a593Smuzhiyun scsi_cmd[4] = 0x2a; /* size */
196*4882a593Smuzhiyun scsi_cmd[5] = 0;
197*4882a593Smuzhiyun scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
198*4882a593Smuzhiyun SCSI_TIMEOUT, 3, NULL);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /**
202*4882a593Smuzhiyun * scsi_alloc_sdev - allocate and setup a scsi_Device
203*4882a593Smuzhiyun * @starget: which target to allocate a &scsi_device for
204*4882a593Smuzhiyun * @lun: which lun
205*4882a593Smuzhiyun * @hostdata: usually NULL and set by ->slave_alloc instead
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * Description:
208*4882a593Smuzhiyun * Allocate, initialize for io, and return a pointer to a scsi_Device.
209*4882a593Smuzhiyun * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
210*4882a593Smuzhiyun * adds scsi_Device to the appropriate list.
211*4882a593Smuzhiyun *
212*4882a593Smuzhiyun * Return value:
213*4882a593Smuzhiyun * scsi_Device pointer, or NULL on failure.
214*4882a593Smuzhiyun **/
scsi_alloc_sdev(struct scsi_target * starget,u64 lun,void * hostdata)215*4882a593Smuzhiyun static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
216*4882a593Smuzhiyun u64 lun, void *hostdata)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct scsi_device *sdev;
219*4882a593Smuzhiyun int display_failure_msg = 1, ret;
220*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
223*4882a593Smuzhiyun GFP_KERNEL);
224*4882a593Smuzhiyun if (!sdev)
225*4882a593Smuzhiyun goto out;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun sdev->vendor = scsi_null_device_strs;
228*4882a593Smuzhiyun sdev->model = scsi_null_device_strs;
229*4882a593Smuzhiyun sdev->rev = scsi_null_device_strs;
230*4882a593Smuzhiyun sdev->host = shost;
231*4882a593Smuzhiyun sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
232*4882a593Smuzhiyun sdev->id = starget->id;
233*4882a593Smuzhiyun sdev->lun = lun;
234*4882a593Smuzhiyun sdev->channel = starget->channel;
235*4882a593Smuzhiyun mutex_init(&sdev->state_mutex);
236*4882a593Smuzhiyun sdev->sdev_state = SDEV_CREATED;
237*4882a593Smuzhiyun INIT_LIST_HEAD(&sdev->siblings);
238*4882a593Smuzhiyun INIT_LIST_HEAD(&sdev->same_target_siblings);
239*4882a593Smuzhiyun INIT_LIST_HEAD(&sdev->starved_entry);
240*4882a593Smuzhiyun INIT_LIST_HEAD(&sdev->event_list);
241*4882a593Smuzhiyun spin_lock_init(&sdev->list_lock);
242*4882a593Smuzhiyun mutex_init(&sdev->inquiry_mutex);
243*4882a593Smuzhiyun INIT_WORK(&sdev->event_work, scsi_evt_thread);
244*4882a593Smuzhiyun INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun sdev->sdev_gendev.parent = get_device(&starget->dev);
247*4882a593Smuzhiyun sdev->sdev_target = starget;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* usually NULL and set by ->slave_alloc instead */
250*4882a593Smuzhiyun sdev->hostdata = hostdata;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* if the device needs this changing, it may do so in the
253*4882a593Smuzhiyun * slave_configure function */
254*4882a593Smuzhiyun sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun * Some low level driver could use device->type
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun sdev->type = -1;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * Assume that the device will have handshaking problems,
263*4882a593Smuzhiyun * and then fix this field later if it turns out it
264*4882a593Smuzhiyun * doesn't
265*4882a593Smuzhiyun */
266*4882a593Smuzhiyun sdev->borken = 1;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun sdev->request_queue = scsi_mq_alloc_queue(sdev);
269*4882a593Smuzhiyun if (!sdev->request_queue) {
270*4882a593Smuzhiyun /* release fn is set up in scsi_sysfs_device_initialise, so
271*4882a593Smuzhiyun * have to free and put manually here */
272*4882a593Smuzhiyun put_device(&starget->dev);
273*4882a593Smuzhiyun kfree(sdev);
274*4882a593Smuzhiyun goto out;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
277*4882a593Smuzhiyun sdev->request_queue->queuedata = sdev;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
280*4882a593Smuzhiyun sdev->host->cmd_per_lun : 1);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun scsi_sysfs_device_initialize(sdev);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (shost->hostt->slave_alloc) {
285*4882a593Smuzhiyun ret = shost->hostt->slave_alloc(sdev);
286*4882a593Smuzhiyun if (ret) {
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * if LLDD reports slave not present, don't clutter
289*4882a593Smuzhiyun * console with alloc failure messages
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun if (ret == -ENXIO)
292*4882a593Smuzhiyun display_failure_msg = 0;
293*4882a593Smuzhiyun goto out_device_destroy;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun return sdev;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun out_device_destroy:
300*4882a593Smuzhiyun __scsi_remove_device(sdev);
301*4882a593Smuzhiyun out:
302*4882a593Smuzhiyun if (display_failure_msg)
303*4882a593Smuzhiyun printk(ALLOC_FAILURE_MSG, __func__);
304*4882a593Smuzhiyun return NULL;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
scsi_target_destroy(struct scsi_target * starget)307*4882a593Smuzhiyun static void scsi_target_destroy(struct scsi_target *starget)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun struct device *dev = &starget->dev;
310*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(dev->parent);
311*4882a593Smuzhiyun unsigned long flags;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun BUG_ON(starget->state == STARGET_DEL);
314*4882a593Smuzhiyun starget->state = STARGET_DEL;
315*4882a593Smuzhiyun transport_destroy_device(dev);
316*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
317*4882a593Smuzhiyun if (shost->hostt->target_destroy)
318*4882a593Smuzhiyun shost->hostt->target_destroy(starget);
319*4882a593Smuzhiyun list_del_init(&starget->siblings);
320*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
321*4882a593Smuzhiyun put_device(dev);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
scsi_target_dev_release(struct device * dev)324*4882a593Smuzhiyun static void scsi_target_dev_release(struct device *dev)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct device *parent = dev->parent;
327*4882a593Smuzhiyun struct scsi_target *starget = to_scsi_target(dev);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun kfree(starget);
330*4882a593Smuzhiyun put_device(parent);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun static struct device_type scsi_target_type = {
334*4882a593Smuzhiyun .name = "scsi_target",
335*4882a593Smuzhiyun .release = scsi_target_dev_release,
336*4882a593Smuzhiyun };
337*4882a593Smuzhiyun
scsi_is_target_device(const struct device * dev)338*4882a593Smuzhiyun int scsi_is_target_device(const struct device *dev)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun return dev->type == &scsi_target_type;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_is_target_device);
343*4882a593Smuzhiyun
__scsi_find_target(struct device * parent,int channel,uint id)344*4882a593Smuzhiyun static struct scsi_target *__scsi_find_target(struct device *parent,
345*4882a593Smuzhiyun int channel, uint id)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun struct scsi_target *starget, *found_starget = NULL;
348*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(parent);
349*4882a593Smuzhiyun /*
350*4882a593Smuzhiyun * Search for an existing target for this sdev.
351*4882a593Smuzhiyun */
352*4882a593Smuzhiyun list_for_each_entry(starget, &shost->__targets, siblings) {
353*4882a593Smuzhiyun if (starget->id == id &&
354*4882a593Smuzhiyun starget->channel == channel) {
355*4882a593Smuzhiyun found_starget = starget;
356*4882a593Smuzhiyun break;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun if (found_starget)
360*4882a593Smuzhiyun get_device(&found_starget->dev);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return found_starget;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /**
366*4882a593Smuzhiyun * scsi_target_reap_ref_release - remove target from visibility
367*4882a593Smuzhiyun * @kref: the reap_ref in the target being released
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * Called on last put of reap_ref, which is the indication that no device
370*4882a593Smuzhiyun * under this target is visible anymore, so render the target invisible in
371*4882a593Smuzhiyun * sysfs. Note: we have to be in user context here because the target reaps
372*4882a593Smuzhiyun * should be done in places where the scsi device visibility is being removed.
373*4882a593Smuzhiyun */
scsi_target_reap_ref_release(struct kref * kref)374*4882a593Smuzhiyun static void scsi_target_reap_ref_release(struct kref *kref)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct scsi_target *starget
377*4882a593Smuzhiyun = container_of(kref, struct scsi_target, reap_ref);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun * if we get here and the target is still in a CREATED state that
381*4882a593Smuzhiyun * means it was allocated but never made visible (because a scan
382*4882a593Smuzhiyun * turned up no LUNs), so don't call device_del() on it.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun if ((starget->state != STARGET_CREATED) &&
385*4882a593Smuzhiyun (starget->state != STARGET_CREATED_REMOVE)) {
386*4882a593Smuzhiyun transport_remove_device(&starget->dev);
387*4882a593Smuzhiyun device_del(&starget->dev);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun scsi_target_destroy(starget);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
scsi_target_reap_ref_put(struct scsi_target * starget)392*4882a593Smuzhiyun static void scsi_target_reap_ref_put(struct scsi_target *starget)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /**
398*4882a593Smuzhiyun * scsi_alloc_target - allocate a new or find an existing target
399*4882a593Smuzhiyun * @parent: parent of the target (need not be a scsi host)
400*4882a593Smuzhiyun * @channel: target channel number (zero if no channels)
401*4882a593Smuzhiyun * @id: target id number
402*4882a593Smuzhiyun *
403*4882a593Smuzhiyun * Return an existing target if one exists, provided it hasn't already
404*4882a593Smuzhiyun * gone into STARGET_DEL state, otherwise allocate a new target.
405*4882a593Smuzhiyun *
406*4882a593Smuzhiyun * The target is returned with an incremented reference, so the caller
407*4882a593Smuzhiyun * is responsible for both reaping and doing a last put
408*4882a593Smuzhiyun */
scsi_alloc_target(struct device * parent,int channel,uint id)409*4882a593Smuzhiyun static struct scsi_target *scsi_alloc_target(struct device *parent,
410*4882a593Smuzhiyun int channel, uint id)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(parent);
413*4882a593Smuzhiyun struct device *dev = NULL;
414*4882a593Smuzhiyun unsigned long flags;
415*4882a593Smuzhiyun const int size = sizeof(struct scsi_target)
416*4882a593Smuzhiyun + shost->transportt->target_size;
417*4882a593Smuzhiyun struct scsi_target *starget;
418*4882a593Smuzhiyun struct scsi_target *found_target;
419*4882a593Smuzhiyun int error, ref_got;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun starget = kzalloc(size, GFP_KERNEL);
422*4882a593Smuzhiyun if (!starget) {
423*4882a593Smuzhiyun printk(KERN_ERR "%s: allocation failure\n", __func__);
424*4882a593Smuzhiyun return NULL;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun dev = &starget->dev;
427*4882a593Smuzhiyun device_initialize(dev);
428*4882a593Smuzhiyun kref_init(&starget->reap_ref);
429*4882a593Smuzhiyun dev->parent = get_device(parent);
430*4882a593Smuzhiyun dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
431*4882a593Smuzhiyun dev->bus = &scsi_bus_type;
432*4882a593Smuzhiyun dev->type = &scsi_target_type;
433*4882a593Smuzhiyun starget->id = id;
434*4882a593Smuzhiyun starget->channel = channel;
435*4882a593Smuzhiyun starget->can_queue = 0;
436*4882a593Smuzhiyun INIT_LIST_HEAD(&starget->siblings);
437*4882a593Smuzhiyun INIT_LIST_HEAD(&starget->devices);
438*4882a593Smuzhiyun starget->state = STARGET_CREATED;
439*4882a593Smuzhiyun starget->scsi_level = SCSI_2;
440*4882a593Smuzhiyun starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
441*4882a593Smuzhiyun retry:
442*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun found_target = __scsi_find_target(parent, channel, id);
445*4882a593Smuzhiyun if (found_target)
446*4882a593Smuzhiyun goto found;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun list_add_tail(&starget->siblings, &shost->__targets);
449*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
450*4882a593Smuzhiyun /* allocate and add */
451*4882a593Smuzhiyun transport_setup_device(dev);
452*4882a593Smuzhiyun if (shost->hostt->target_alloc) {
453*4882a593Smuzhiyun error = shost->hostt->target_alloc(starget);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if(error) {
456*4882a593Smuzhiyun if (error != -ENXIO)
457*4882a593Smuzhiyun dev_err(dev, "target allocation failed, error %d\n", error);
458*4882a593Smuzhiyun /* don't want scsi_target_reap to do the final
459*4882a593Smuzhiyun * put because it will be under the host lock */
460*4882a593Smuzhiyun scsi_target_destroy(starget);
461*4882a593Smuzhiyun return NULL;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun get_device(dev);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return starget;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun found:
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * release routine already fired if kref is zero, so if we can still
471*4882a593Smuzhiyun * take the reference, the target must be alive. If we can't, it must
472*4882a593Smuzhiyun * be dying and we need to wait for a new target
473*4882a593Smuzhiyun */
474*4882a593Smuzhiyun ref_got = kref_get_unless_zero(&found_target->reap_ref);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
477*4882a593Smuzhiyun if (ref_got) {
478*4882a593Smuzhiyun put_device(dev);
479*4882a593Smuzhiyun return found_target;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun /*
482*4882a593Smuzhiyun * Unfortunately, we found a dying target; need to wait until it's
483*4882a593Smuzhiyun * dead before we can get a new one. There is an anomaly here. We
484*4882a593Smuzhiyun * *should* call scsi_target_reap() to balance the kref_get() of the
485*4882a593Smuzhiyun * reap_ref above. However, since the target being released, it's
486*4882a593Smuzhiyun * already invisible and the reap_ref is irrelevant. If we call
487*4882a593Smuzhiyun * scsi_target_reap() we might spuriously do another device_del() on
488*4882a593Smuzhiyun * an already invisible target.
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun put_device(&found_target->dev);
491*4882a593Smuzhiyun /*
492*4882a593Smuzhiyun * length of time is irrelevant here, we just want to yield the CPU
493*4882a593Smuzhiyun * for a tick to avoid busy waiting for the target to die.
494*4882a593Smuzhiyun */
495*4882a593Smuzhiyun msleep(1);
496*4882a593Smuzhiyun goto retry;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /**
500*4882a593Smuzhiyun * scsi_target_reap - check to see if target is in use and destroy if not
501*4882a593Smuzhiyun * @starget: target to be checked
502*4882a593Smuzhiyun *
503*4882a593Smuzhiyun * This is used after removing a LUN or doing a last put of the target
504*4882a593Smuzhiyun * it checks atomically that nothing is using the target and removes
505*4882a593Smuzhiyun * it if so.
506*4882a593Smuzhiyun */
scsi_target_reap(struct scsi_target * starget)507*4882a593Smuzhiyun void scsi_target_reap(struct scsi_target *starget)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun /*
510*4882a593Smuzhiyun * serious problem if this triggers: STARGET_DEL is only set in the if
511*4882a593Smuzhiyun * the reap_ref drops to zero, so we're trying to do another final put
512*4882a593Smuzhiyun * on an already released kref
513*4882a593Smuzhiyun */
514*4882a593Smuzhiyun BUG_ON(starget->state == STARGET_DEL);
515*4882a593Smuzhiyun scsi_target_reap_ref_put(starget);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /**
519*4882a593Smuzhiyun * scsi_sanitize_inquiry_string - remove non-graphical chars from an
520*4882a593Smuzhiyun * INQUIRY result string
521*4882a593Smuzhiyun * @s: INQUIRY result string to sanitize
522*4882a593Smuzhiyun * @len: length of the string
523*4882a593Smuzhiyun *
524*4882a593Smuzhiyun * Description:
525*4882a593Smuzhiyun * The SCSI spec says that INQUIRY vendor, product, and revision
526*4882a593Smuzhiyun * strings must consist entirely of graphic ASCII characters,
527*4882a593Smuzhiyun * padded on the right with spaces. Since not all devices obey
528*4882a593Smuzhiyun * this rule, we will replace non-graphic or non-ASCII characters
529*4882a593Smuzhiyun * with spaces. Exception: a NUL character is interpreted as a
530*4882a593Smuzhiyun * string terminator, so all the following characters are set to
531*4882a593Smuzhiyun * spaces.
532*4882a593Smuzhiyun **/
scsi_sanitize_inquiry_string(unsigned char * s,int len)533*4882a593Smuzhiyun void scsi_sanitize_inquiry_string(unsigned char *s, int len)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun int terminated = 0;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun for (; len > 0; (--len, ++s)) {
538*4882a593Smuzhiyun if (*s == 0)
539*4882a593Smuzhiyun terminated = 1;
540*4882a593Smuzhiyun if (terminated || *s < 0x20 || *s > 0x7e)
541*4882a593Smuzhiyun *s = ' ';
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
548*4882a593Smuzhiyun * @sdev: scsi_device to probe
549*4882a593Smuzhiyun * @inq_result: area to store the INQUIRY result
550*4882a593Smuzhiyun * @result_len: len of inq_result
551*4882a593Smuzhiyun * @bflags: store any bflags found here
552*4882a593Smuzhiyun *
553*4882a593Smuzhiyun * Description:
554*4882a593Smuzhiyun * Probe the lun associated with @req using a standard SCSI INQUIRY;
555*4882a593Smuzhiyun *
556*4882a593Smuzhiyun * If the INQUIRY is successful, zero is returned and the
557*4882a593Smuzhiyun * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
558*4882a593Smuzhiyun * are copied to the scsi_device any flags value is stored in *@bflags.
559*4882a593Smuzhiyun **/
scsi_probe_lun(struct scsi_device * sdev,unsigned char * inq_result,int result_len,blist_flags_t * bflags)560*4882a593Smuzhiyun static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
561*4882a593Smuzhiyun int result_len, blist_flags_t *bflags)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun unsigned char scsi_cmd[MAX_COMMAND_SIZE];
564*4882a593Smuzhiyun int first_inquiry_len, try_inquiry_len, next_inquiry_len;
565*4882a593Smuzhiyun int response_len = 0;
566*4882a593Smuzhiyun int pass, count, result;
567*4882a593Smuzhiyun struct scsi_sense_hdr sshdr;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun *bflags = 0;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* Perform up to 3 passes. The first pass uses a conservative
572*4882a593Smuzhiyun * transfer length of 36 unless sdev->inquiry_len specifies a
573*4882a593Smuzhiyun * different value. */
574*4882a593Smuzhiyun first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
575*4882a593Smuzhiyun try_inquiry_len = first_inquiry_len;
576*4882a593Smuzhiyun pass = 1;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun next_pass:
579*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
580*4882a593Smuzhiyun "scsi scan: INQUIRY pass %d length %d\n",
581*4882a593Smuzhiyun pass, try_inquiry_len));
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* Each pass gets up to three chances to ignore Unit Attention */
584*4882a593Smuzhiyun for (count = 0; count < 3; ++count) {
585*4882a593Smuzhiyun int resid;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun memset(scsi_cmd, 0, 6);
588*4882a593Smuzhiyun scsi_cmd[0] = INQUIRY;
589*4882a593Smuzhiyun scsi_cmd[4] = (unsigned char) try_inquiry_len;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun memset(inq_result, 0, try_inquiry_len);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
594*4882a593Smuzhiyun inq_result, try_inquiry_len, &sshdr,
595*4882a593Smuzhiyun HZ / 2 + HZ * scsi_inq_timeout, 3,
596*4882a593Smuzhiyun &resid);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
599*4882a593Smuzhiyun "scsi scan: INQUIRY %s with code 0x%x\n",
600*4882a593Smuzhiyun result ? "failed" : "successful", result));
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (result) {
603*4882a593Smuzhiyun /*
604*4882a593Smuzhiyun * not-ready to ready transition [asc/ascq=0x28/0x0]
605*4882a593Smuzhiyun * or power-on, reset [asc/ascq=0x29/0x0], continue.
606*4882a593Smuzhiyun * INQUIRY should not yield UNIT_ATTENTION
607*4882a593Smuzhiyun * but many buggy devices do so anyway.
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun if (driver_byte(result) == DRIVER_SENSE &&
610*4882a593Smuzhiyun scsi_sense_valid(&sshdr)) {
611*4882a593Smuzhiyun if ((sshdr.sense_key == UNIT_ATTENTION) &&
612*4882a593Smuzhiyun ((sshdr.asc == 0x28) ||
613*4882a593Smuzhiyun (sshdr.asc == 0x29)) &&
614*4882a593Smuzhiyun (sshdr.ascq == 0))
615*4882a593Smuzhiyun continue;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun } else {
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * if nothing was transferred, we try
620*4882a593Smuzhiyun * again. It's a workaround for some USB
621*4882a593Smuzhiyun * devices.
622*4882a593Smuzhiyun */
623*4882a593Smuzhiyun if (resid == try_inquiry_len)
624*4882a593Smuzhiyun continue;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun break;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun if (result == 0) {
630*4882a593Smuzhiyun scsi_sanitize_inquiry_string(&inq_result[8], 8);
631*4882a593Smuzhiyun scsi_sanitize_inquiry_string(&inq_result[16], 16);
632*4882a593Smuzhiyun scsi_sanitize_inquiry_string(&inq_result[32], 4);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun response_len = inq_result[4] + 5;
635*4882a593Smuzhiyun if (response_len > 255)
636*4882a593Smuzhiyun response_len = first_inquiry_len; /* sanity */
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /*
639*4882a593Smuzhiyun * Get any flags for this device.
640*4882a593Smuzhiyun *
641*4882a593Smuzhiyun * XXX add a bflags to scsi_device, and replace the
642*4882a593Smuzhiyun * corresponding bit fields in scsi_device, so bflags
643*4882a593Smuzhiyun * need not be passed as an argument.
644*4882a593Smuzhiyun */
645*4882a593Smuzhiyun *bflags = scsi_get_device_flags(sdev, &inq_result[8],
646*4882a593Smuzhiyun &inq_result[16]);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* When the first pass succeeds we gain information about
649*4882a593Smuzhiyun * what larger transfer lengths might work. */
650*4882a593Smuzhiyun if (pass == 1) {
651*4882a593Smuzhiyun if (BLIST_INQUIRY_36 & *bflags)
652*4882a593Smuzhiyun next_inquiry_len = 36;
653*4882a593Smuzhiyun else if (sdev->inquiry_len)
654*4882a593Smuzhiyun next_inquiry_len = sdev->inquiry_len;
655*4882a593Smuzhiyun else
656*4882a593Smuzhiyun next_inquiry_len = response_len;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* If more data is available perform the second pass */
659*4882a593Smuzhiyun if (next_inquiry_len > try_inquiry_len) {
660*4882a593Smuzhiyun try_inquiry_len = next_inquiry_len;
661*4882a593Smuzhiyun pass = 2;
662*4882a593Smuzhiyun goto next_pass;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun } else if (pass == 2) {
667*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev,
668*4882a593Smuzhiyun "scsi scan: %d byte inquiry failed. "
669*4882a593Smuzhiyun "Consider BLIST_INQUIRY_36 for this device\n",
670*4882a593Smuzhiyun try_inquiry_len);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* If this pass failed, the third pass goes back and transfers
673*4882a593Smuzhiyun * the same amount as we successfully got in the first pass. */
674*4882a593Smuzhiyun try_inquiry_len = first_inquiry_len;
675*4882a593Smuzhiyun pass = 3;
676*4882a593Smuzhiyun goto next_pass;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* If the last transfer attempt got an error, assume the
680*4882a593Smuzhiyun * peripheral doesn't exist or is dead. */
681*4882a593Smuzhiyun if (result)
682*4882a593Smuzhiyun return -EIO;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /* Don't report any more data than the device says is valid */
685*4882a593Smuzhiyun sdev->inquiry_len = min(try_inquiry_len, response_len);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun /*
688*4882a593Smuzhiyun * XXX Abort if the response length is less than 36? If less than
689*4882a593Smuzhiyun * 32, the lookup of the device flags (above) could be invalid,
690*4882a593Smuzhiyun * and it would be possible to take an incorrect action - we do
691*4882a593Smuzhiyun * not want to hang because of a short INQUIRY. On the flip side,
692*4882a593Smuzhiyun * if the device is spun down or becoming ready (and so it gives a
693*4882a593Smuzhiyun * short INQUIRY), an abort here prevents any further use of the
694*4882a593Smuzhiyun * device, including spin up.
695*4882a593Smuzhiyun *
696*4882a593Smuzhiyun * On the whole, the best approach seems to be to assume the first
697*4882a593Smuzhiyun * 36 bytes are valid no matter what the device says. That's
698*4882a593Smuzhiyun * better than copying < 36 bytes to the inquiry-result buffer
699*4882a593Smuzhiyun * and displaying garbage for the Vendor, Product, or Revision
700*4882a593Smuzhiyun * strings.
701*4882a593Smuzhiyun */
702*4882a593Smuzhiyun if (sdev->inquiry_len < 36) {
703*4882a593Smuzhiyun if (!sdev->host->short_inquiry) {
704*4882a593Smuzhiyun shost_printk(KERN_INFO, sdev->host,
705*4882a593Smuzhiyun "scsi scan: INQUIRY result too short (%d),"
706*4882a593Smuzhiyun " using 36\n", sdev->inquiry_len);
707*4882a593Smuzhiyun sdev->host->short_inquiry = 1;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun sdev->inquiry_len = 36;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /*
713*4882a593Smuzhiyun * Related to the above issue:
714*4882a593Smuzhiyun *
715*4882a593Smuzhiyun * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
716*4882a593Smuzhiyun * and if not ready, sent a START_STOP to start (maybe spin up) and
717*4882a593Smuzhiyun * then send the INQUIRY again, since the INQUIRY can change after
718*4882a593Smuzhiyun * a device is initialized.
719*4882a593Smuzhiyun *
720*4882a593Smuzhiyun * Ideally, start a device if explicitly asked to do so. This
721*4882a593Smuzhiyun * assumes that a device is spun up on power on, spun down on
722*4882a593Smuzhiyun * request, and then spun up on request.
723*4882a593Smuzhiyun */
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /*
726*4882a593Smuzhiyun * The scanning code needs to know the scsi_level, even if no
727*4882a593Smuzhiyun * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
728*4882a593Smuzhiyun * non-zero LUNs can be scanned.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun sdev->scsi_level = inq_result[2] & 0x07;
731*4882a593Smuzhiyun if (sdev->scsi_level >= 2 ||
732*4882a593Smuzhiyun (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
733*4882a593Smuzhiyun sdev->scsi_level++;
734*4882a593Smuzhiyun sdev->sdev_target->scsi_level = sdev->scsi_level;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /*
737*4882a593Smuzhiyun * If SCSI-2 or lower, and if the transport requires it,
738*4882a593Smuzhiyun * store the LUN value in CDB[1].
739*4882a593Smuzhiyun */
740*4882a593Smuzhiyun sdev->lun_in_cdb = 0;
741*4882a593Smuzhiyun if (sdev->scsi_level <= SCSI_2 &&
742*4882a593Smuzhiyun sdev->scsi_level != SCSI_UNKNOWN &&
743*4882a593Smuzhiyun !sdev->host->no_scsi2_lun_in_cdb)
744*4882a593Smuzhiyun sdev->lun_in_cdb = 1;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun return 0;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /**
750*4882a593Smuzhiyun * scsi_add_lun - allocate and fully initialze a scsi_device
751*4882a593Smuzhiyun * @sdev: holds information to be stored in the new scsi_device
752*4882a593Smuzhiyun * @inq_result: holds the result of a previous INQUIRY to the LUN
753*4882a593Smuzhiyun * @bflags: black/white list flag
754*4882a593Smuzhiyun * @async: 1 if this device is being scanned asynchronously
755*4882a593Smuzhiyun *
756*4882a593Smuzhiyun * Description:
757*4882a593Smuzhiyun * Initialize the scsi_device @sdev. Optionally set fields based
758*4882a593Smuzhiyun * on values in *@bflags.
759*4882a593Smuzhiyun *
760*4882a593Smuzhiyun * Return:
761*4882a593Smuzhiyun * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
762*4882a593Smuzhiyun * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
763*4882a593Smuzhiyun **/
scsi_add_lun(struct scsi_device * sdev,unsigned char * inq_result,blist_flags_t * bflags,int async)764*4882a593Smuzhiyun static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
765*4882a593Smuzhiyun blist_flags_t *bflags, int async)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun int ret;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun * XXX do not save the inquiry, since it can change underneath us,
771*4882a593Smuzhiyun * save just vendor/model/rev.
772*4882a593Smuzhiyun *
773*4882a593Smuzhiyun * Rather than save it and have an ioctl that retrieves the saved
774*4882a593Smuzhiyun * value, have an ioctl that executes the same INQUIRY code used
775*4882a593Smuzhiyun * in scsi_probe_lun, let user level programs doing INQUIRY
776*4882a593Smuzhiyun * scanning run at their own risk, or supply a user level program
777*4882a593Smuzhiyun * that can correctly scan.
778*4882a593Smuzhiyun */
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /*
781*4882a593Smuzhiyun * Copy at least 36 bytes of INQUIRY data, so that we don't
782*4882a593Smuzhiyun * dereference unallocated memory when accessing the Vendor,
783*4882a593Smuzhiyun * Product, and Revision strings. Badly behaved devices may set
784*4882a593Smuzhiyun * the INQUIRY Additional Length byte to a small value, indicating
785*4882a593Smuzhiyun * these strings are invalid, but often they contain plausible data
786*4882a593Smuzhiyun * nonetheless. It doesn't matter if the device sent < 36 bytes
787*4882a593Smuzhiyun * total, since scsi_probe_lun() initializes inq_result with 0s.
788*4882a593Smuzhiyun */
789*4882a593Smuzhiyun sdev->inquiry = kmemdup(inq_result,
790*4882a593Smuzhiyun max_t(size_t, sdev->inquiry_len, 36),
791*4882a593Smuzhiyun GFP_KERNEL);
792*4882a593Smuzhiyun if (sdev->inquiry == NULL)
793*4882a593Smuzhiyun return SCSI_SCAN_NO_RESPONSE;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun sdev->vendor = (char *) (sdev->inquiry + 8);
796*4882a593Smuzhiyun sdev->model = (char *) (sdev->inquiry + 16);
797*4882a593Smuzhiyun sdev->rev = (char *) (sdev->inquiry + 32);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * sata emulation layer device. This is a hack to work around
802*4882a593Smuzhiyun * the SATL power management specifications which state that
803*4882a593Smuzhiyun * when the SATL detects the device has gone into standby
804*4882a593Smuzhiyun * mode, it shall respond with NOT READY.
805*4882a593Smuzhiyun */
806*4882a593Smuzhiyun sdev->allow_restart = 1;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (*bflags & BLIST_ISROM) {
810*4882a593Smuzhiyun sdev->type = TYPE_ROM;
811*4882a593Smuzhiyun sdev->removable = 1;
812*4882a593Smuzhiyun } else {
813*4882a593Smuzhiyun sdev->type = (inq_result[0] & 0x1f);
814*4882a593Smuzhiyun sdev->removable = (inq_result[1] & 0x80) >> 7;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun * some devices may respond with wrong type for
818*4882a593Smuzhiyun * well-known logical units. Force well-known type
819*4882a593Smuzhiyun * to enumerate them correctly.
820*4882a593Smuzhiyun */
821*4882a593Smuzhiyun if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
822*4882a593Smuzhiyun sdev_printk(KERN_WARNING, sdev,
823*4882a593Smuzhiyun "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
824*4882a593Smuzhiyun __func__, sdev->type, (unsigned int)sdev->lun);
825*4882a593Smuzhiyun sdev->type = TYPE_WLUN;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
831*4882a593Smuzhiyun /* RBC and MMC devices can return SCSI-3 compliance and yet
832*4882a593Smuzhiyun * still not support REPORT LUNS, so make them act as
833*4882a593Smuzhiyun * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
834*4882a593Smuzhiyun * specifically set */
835*4882a593Smuzhiyun if ((*bflags & BLIST_REPORTLUN2) == 0)
836*4882a593Smuzhiyun *bflags |= BLIST_NOREPORTLUN;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun /*
840*4882a593Smuzhiyun * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
841*4882a593Smuzhiyun * spec says: The device server is capable of supporting the
842*4882a593Smuzhiyun * specified peripheral device type on this logical unit. However,
843*4882a593Smuzhiyun * the physical device is not currently connected to this logical
844*4882a593Smuzhiyun * unit.
845*4882a593Smuzhiyun *
846*4882a593Smuzhiyun * The above is vague, as it implies that we could treat 001 and
847*4882a593Smuzhiyun * 011 the same. Stay compatible with previous code, and create a
848*4882a593Smuzhiyun * scsi_device for a PQ of 1
849*4882a593Smuzhiyun *
850*4882a593Smuzhiyun * Don't set the device offline here; rather let the upper
851*4882a593Smuzhiyun * level drivers eval the PQ to decide whether they should
852*4882a593Smuzhiyun * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
853*4882a593Smuzhiyun */
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
856*4882a593Smuzhiyun sdev->lockable = sdev->removable;
857*4882a593Smuzhiyun sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun if (sdev->scsi_level >= SCSI_3 ||
860*4882a593Smuzhiyun (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
861*4882a593Smuzhiyun sdev->ppr = 1;
862*4882a593Smuzhiyun if (inq_result[7] & 0x60)
863*4882a593Smuzhiyun sdev->wdtr = 1;
864*4882a593Smuzhiyun if (inq_result[7] & 0x10)
865*4882a593Smuzhiyun sdev->sdtr = 1;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
868*4882a593Smuzhiyun "ANSI: %d%s\n", scsi_device_type(sdev->type),
869*4882a593Smuzhiyun sdev->vendor, sdev->model, sdev->rev,
870*4882a593Smuzhiyun sdev->inq_periph_qual, inq_result[2] & 0x07,
871*4882a593Smuzhiyun (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
874*4882a593Smuzhiyun !(*bflags & BLIST_NOTQ)) {
875*4882a593Smuzhiyun sdev->tagged_supported = 1;
876*4882a593Smuzhiyun sdev->simple_tags = 1;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /*
880*4882a593Smuzhiyun * Some devices (Texel CD ROM drives) have handshaking problems
881*4882a593Smuzhiyun * when used with the Seagate controllers. borken is initialized
882*4882a593Smuzhiyun * to 1, and then set it to 0 here.
883*4882a593Smuzhiyun */
884*4882a593Smuzhiyun if ((*bflags & BLIST_BORKEN) == 0)
885*4882a593Smuzhiyun sdev->borken = 0;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (*bflags & BLIST_NO_ULD_ATTACH)
888*4882a593Smuzhiyun sdev->no_uld_attach = 1;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /*
891*4882a593Smuzhiyun * Apparently some really broken devices (contrary to the SCSI
892*4882a593Smuzhiyun * standards) need to be selected without asserting ATN
893*4882a593Smuzhiyun */
894*4882a593Smuzhiyun if (*bflags & BLIST_SELECT_NO_ATN)
895*4882a593Smuzhiyun sdev->select_no_atn = 1;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun /*
898*4882a593Smuzhiyun * Maximum 512 sector transfer length
899*4882a593Smuzhiyun * broken RA4x00 Compaq Disk Array
900*4882a593Smuzhiyun */
901*4882a593Smuzhiyun if (*bflags & BLIST_MAX_512)
902*4882a593Smuzhiyun blk_queue_max_hw_sectors(sdev->request_queue, 512);
903*4882a593Smuzhiyun /*
904*4882a593Smuzhiyun * Max 1024 sector transfer length for targets that report incorrect
905*4882a593Smuzhiyun * max/optimal lengths and relied on the old block layer safe default
906*4882a593Smuzhiyun */
907*4882a593Smuzhiyun else if (*bflags & BLIST_MAX_1024)
908*4882a593Smuzhiyun blk_queue_max_hw_sectors(sdev->request_queue, 1024);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /*
911*4882a593Smuzhiyun * Some devices may not want to have a start command automatically
912*4882a593Smuzhiyun * issued when a device is added.
913*4882a593Smuzhiyun */
914*4882a593Smuzhiyun if (*bflags & BLIST_NOSTARTONADD)
915*4882a593Smuzhiyun sdev->no_start_on_add = 1;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (*bflags & BLIST_SINGLELUN)
918*4882a593Smuzhiyun scsi_target(sdev)->single_lun = 1;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun sdev->use_10_for_rw = 1;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun /* some devices don't like REPORT SUPPORTED OPERATION CODES
923*4882a593Smuzhiyun * and will simply timeout causing sd_mod init to take a very
924*4882a593Smuzhiyun * very long time */
925*4882a593Smuzhiyun if (*bflags & BLIST_NO_RSOC)
926*4882a593Smuzhiyun sdev->no_report_opcodes = 1;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /* set the device running here so that slave configure
929*4882a593Smuzhiyun * may do I/O */
930*4882a593Smuzhiyun mutex_lock(&sdev->state_mutex);
931*4882a593Smuzhiyun ret = scsi_device_set_state(sdev, SDEV_RUNNING);
932*4882a593Smuzhiyun if (ret)
933*4882a593Smuzhiyun ret = scsi_device_set_state(sdev, SDEV_BLOCK);
934*4882a593Smuzhiyun mutex_unlock(&sdev->state_mutex);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun if (ret) {
937*4882a593Smuzhiyun sdev_printk(KERN_ERR, sdev,
938*4882a593Smuzhiyun "in wrong state %s to complete scan\n",
939*4882a593Smuzhiyun scsi_device_state_name(sdev->sdev_state));
940*4882a593Smuzhiyun return SCSI_SCAN_NO_RESPONSE;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (*bflags & BLIST_NOT_LOCKABLE)
944*4882a593Smuzhiyun sdev->lockable = 0;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (*bflags & BLIST_RETRY_HWERROR)
947*4882a593Smuzhiyun sdev->retry_hwerror = 1;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun if (*bflags & BLIST_NO_DIF)
950*4882a593Smuzhiyun sdev->no_dif = 1;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun if (*bflags & BLIST_UNMAP_LIMIT_WS)
953*4882a593Smuzhiyun sdev->unmap_limit_for_ws = 1;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (*bflags & BLIST_TRY_VPD_PAGES)
958*4882a593Smuzhiyun sdev->try_vpd_pages = 1;
959*4882a593Smuzhiyun else if (*bflags & BLIST_SKIP_VPD_PAGES)
960*4882a593Smuzhiyun sdev->skip_vpd_pages = 1;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun transport_configure_device(&sdev->sdev_gendev);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun if (sdev->host->hostt->slave_configure) {
965*4882a593Smuzhiyun ret = sdev->host->hostt->slave_configure(sdev);
966*4882a593Smuzhiyun if (ret) {
967*4882a593Smuzhiyun /*
968*4882a593Smuzhiyun * if LLDD reports slave not present, don't clutter
969*4882a593Smuzhiyun * console with alloc failure messages
970*4882a593Smuzhiyun */
971*4882a593Smuzhiyun if (ret != -ENXIO) {
972*4882a593Smuzhiyun sdev_printk(KERN_ERR, sdev,
973*4882a593Smuzhiyun "failed to configure device\n");
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun return SCSI_SCAN_NO_RESPONSE;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun if (sdev->scsi_level >= SCSI_3)
980*4882a593Smuzhiyun scsi_attach_vpd(sdev);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun sdev->max_queue_depth = sdev->queue_depth;
983*4882a593Smuzhiyun sdev->sdev_bflags = *bflags;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun /*
986*4882a593Smuzhiyun * Ok, the device is now all set up, we can
987*4882a593Smuzhiyun * register it and tell the rest of the kernel
988*4882a593Smuzhiyun * about it.
989*4882a593Smuzhiyun */
990*4882a593Smuzhiyun if (!async && scsi_sysfs_add_sdev(sdev) != 0)
991*4882a593Smuzhiyun return SCSI_SCAN_NO_RESPONSE;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun return SCSI_SCAN_LUN_PRESENT;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LOGGING
997*4882a593Smuzhiyun /**
998*4882a593Smuzhiyun * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
999*4882a593Smuzhiyun * @buf: Output buffer with at least end-first+1 bytes of space
1000*4882a593Smuzhiyun * @inq: Inquiry buffer (input)
1001*4882a593Smuzhiyun * @first: Offset of string into inq
1002*4882a593Smuzhiyun * @end: Index after last character in inq
1003*4882a593Smuzhiyun */
scsi_inq_str(unsigned char * buf,unsigned char * inq,unsigned first,unsigned end)1004*4882a593Smuzhiyun static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1005*4882a593Smuzhiyun unsigned first, unsigned end)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun unsigned term = 0, idx;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1010*4882a593Smuzhiyun if (inq[idx+first] > ' ') {
1011*4882a593Smuzhiyun buf[idx] = inq[idx+first];
1012*4882a593Smuzhiyun term = idx+1;
1013*4882a593Smuzhiyun } else {
1014*4882a593Smuzhiyun buf[idx] = ' ';
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun buf[term] = 0;
1018*4882a593Smuzhiyun return buf;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun #endif
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /**
1023*4882a593Smuzhiyun * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1024*4882a593Smuzhiyun * @starget: pointer to target device structure
1025*4882a593Smuzhiyun * @lun: LUN of target device
1026*4882a593Smuzhiyun * @bflagsp: store bflags here if not NULL
1027*4882a593Smuzhiyun * @sdevp: probe the LUN corresponding to this scsi_device
1028*4882a593Smuzhiyun * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only
1029*4882a593Smuzhiyun * needed on first scan
1030*4882a593Smuzhiyun * @hostdata: passed to scsi_alloc_sdev()
1031*4882a593Smuzhiyun *
1032*4882a593Smuzhiyun * Description:
1033*4882a593Smuzhiyun * Call scsi_probe_lun, if a LUN with an attached device is found,
1034*4882a593Smuzhiyun * allocate and set it up by calling scsi_add_lun.
1035*4882a593Smuzhiyun *
1036*4882a593Smuzhiyun * Return:
1037*4882a593Smuzhiyun *
1038*4882a593Smuzhiyun * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1039*4882a593Smuzhiyun * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1040*4882a593Smuzhiyun * attached at the LUN
1041*4882a593Smuzhiyun * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1042*4882a593Smuzhiyun **/
scsi_probe_and_add_lun(struct scsi_target * starget,u64 lun,blist_flags_t * bflagsp,struct scsi_device ** sdevp,enum scsi_scan_mode rescan,void * hostdata)1043*4882a593Smuzhiyun static int scsi_probe_and_add_lun(struct scsi_target *starget,
1044*4882a593Smuzhiyun u64 lun, blist_flags_t *bflagsp,
1045*4882a593Smuzhiyun struct scsi_device **sdevp,
1046*4882a593Smuzhiyun enum scsi_scan_mode rescan,
1047*4882a593Smuzhiyun void *hostdata)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun struct scsi_device *sdev;
1050*4882a593Smuzhiyun unsigned char *result;
1051*4882a593Smuzhiyun blist_flags_t bflags;
1052*4882a593Smuzhiyun int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1053*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun /*
1056*4882a593Smuzhiyun * The rescan flag is used as an optimization, the first scan of a
1057*4882a593Smuzhiyun * host adapter calls into here with rescan == 0.
1058*4882a593Smuzhiyun */
1059*4882a593Smuzhiyun sdev = scsi_device_lookup_by_target(starget, lun);
1060*4882a593Smuzhiyun if (sdev) {
1061*4882a593Smuzhiyun if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1062*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1063*4882a593Smuzhiyun "scsi scan: device exists on %s\n",
1064*4882a593Smuzhiyun dev_name(&sdev->sdev_gendev)));
1065*4882a593Smuzhiyun if (sdevp)
1066*4882a593Smuzhiyun *sdevp = sdev;
1067*4882a593Smuzhiyun else
1068*4882a593Smuzhiyun scsi_device_put(sdev);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (bflagsp)
1071*4882a593Smuzhiyun *bflagsp = scsi_get_device_flags(sdev,
1072*4882a593Smuzhiyun sdev->vendor,
1073*4882a593Smuzhiyun sdev->model);
1074*4882a593Smuzhiyun return SCSI_SCAN_LUN_PRESENT;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun scsi_device_put(sdev);
1077*4882a593Smuzhiyun } else
1078*4882a593Smuzhiyun sdev = scsi_alloc_sdev(starget, lun, hostdata);
1079*4882a593Smuzhiyun if (!sdev)
1080*4882a593Smuzhiyun goto out;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun result = kmalloc(result_len, GFP_KERNEL |
1083*4882a593Smuzhiyun ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
1084*4882a593Smuzhiyun if (!result)
1085*4882a593Smuzhiyun goto out_free_sdev;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if (scsi_probe_lun(sdev, result, result_len, &bflags))
1088*4882a593Smuzhiyun goto out_free_result;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun if (bflagsp)
1091*4882a593Smuzhiyun *bflagsp = bflags;
1092*4882a593Smuzhiyun /*
1093*4882a593Smuzhiyun * result contains valid SCSI INQUIRY data.
1094*4882a593Smuzhiyun */
1095*4882a593Smuzhiyun if ((result[0] >> 5) == 3) {
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * For a Peripheral qualifier 3 (011b), the SCSI
1098*4882a593Smuzhiyun * spec says: The device server is not capable of
1099*4882a593Smuzhiyun * supporting a physical device on this logical
1100*4882a593Smuzhiyun * unit.
1101*4882a593Smuzhiyun *
1102*4882a593Smuzhiyun * For disks, this implies that there is no
1103*4882a593Smuzhiyun * logical disk configured at sdev->lun, but there
1104*4882a593Smuzhiyun * is a target id responding.
1105*4882a593Smuzhiyun */
1106*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1107*4882a593Smuzhiyun " peripheral qualifier of 3, device not"
1108*4882a593Smuzhiyun " added\n"))
1109*4882a593Smuzhiyun if (lun == 0) {
1110*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(1, {
1111*4882a593Smuzhiyun unsigned char vend[9];
1112*4882a593Smuzhiyun unsigned char mod[17];
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev,
1115*4882a593Smuzhiyun "scsi scan: consider passing scsi_mod."
1116*4882a593Smuzhiyun "dev_flags=%s:%s:0x240 or 0x1000240\n",
1117*4882a593Smuzhiyun scsi_inq_str(vend, result, 8, 16),
1118*4882a593Smuzhiyun scsi_inq_str(mod, result, 16, 32));
1119*4882a593Smuzhiyun });
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun res = SCSI_SCAN_TARGET_PRESENT;
1124*4882a593Smuzhiyun goto out_free_result;
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /*
1128*4882a593Smuzhiyun * Some targets may set slight variations of PQ and PDT to signal
1129*4882a593Smuzhiyun * that no LUN is present, so don't add sdev in these cases.
1130*4882a593Smuzhiyun * Two specific examples are:
1131*4882a593Smuzhiyun * 1) NetApp targets: return PQ=1, PDT=0x1f
1132*4882a593Smuzhiyun * 2) IBM/2145 targets: return PQ=1, PDT=0
1133*4882a593Smuzhiyun * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1134*4882a593Smuzhiyun * in the UFI 1.0 spec (we cannot rely on reserved bits).
1135*4882a593Smuzhiyun *
1136*4882a593Smuzhiyun * References:
1137*4882a593Smuzhiyun * 1) SCSI SPC-3, pp. 145-146
1138*4882a593Smuzhiyun * PQ=1: "A peripheral device having the specified peripheral
1139*4882a593Smuzhiyun * device type is not connected to this logical unit. However, the
1140*4882a593Smuzhiyun * device server is capable of supporting the specified peripheral
1141*4882a593Smuzhiyun * device type on this logical unit."
1142*4882a593Smuzhiyun * PDT=0x1f: "Unknown or no device type"
1143*4882a593Smuzhiyun * 2) USB UFI 1.0, p. 20
1144*4882a593Smuzhiyun * PDT=00h Direct-access device (floppy)
1145*4882a593Smuzhiyun * PDT=1Fh none (no FDD connected to the requested logical unit)
1146*4882a593Smuzhiyun */
1147*4882a593Smuzhiyun if (((result[0] >> 5) == 1 ||
1148*4882a593Smuzhiyun (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
1149*4882a593Smuzhiyun !scsi_is_wlun(lun)) {
1150*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1151*4882a593Smuzhiyun "scsi scan: peripheral device type"
1152*4882a593Smuzhiyun " of 31, no device added\n"));
1153*4882a593Smuzhiyun res = SCSI_SCAN_TARGET_PRESENT;
1154*4882a593Smuzhiyun goto out_free_result;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1158*4882a593Smuzhiyun if (res == SCSI_SCAN_LUN_PRESENT) {
1159*4882a593Smuzhiyun if (bflags & BLIST_KEY) {
1160*4882a593Smuzhiyun sdev->lockable = 0;
1161*4882a593Smuzhiyun scsi_unlock_floptical(sdev, result);
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun out_free_result:
1166*4882a593Smuzhiyun kfree(result);
1167*4882a593Smuzhiyun out_free_sdev:
1168*4882a593Smuzhiyun if (res == SCSI_SCAN_LUN_PRESENT) {
1169*4882a593Smuzhiyun if (sdevp) {
1170*4882a593Smuzhiyun if (scsi_device_get(sdev) == 0) {
1171*4882a593Smuzhiyun *sdevp = sdev;
1172*4882a593Smuzhiyun } else {
1173*4882a593Smuzhiyun __scsi_remove_device(sdev);
1174*4882a593Smuzhiyun res = SCSI_SCAN_NO_RESPONSE;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun } else
1178*4882a593Smuzhiyun __scsi_remove_device(sdev);
1179*4882a593Smuzhiyun out:
1180*4882a593Smuzhiyun return res;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun /**
1184*4882a593Smuzhiyun * scsi_sequential_lun_scan - sequentially scan a SCSI target
1185*4882a593Smuzhiyun * @starget: pointer to target structure to scan
1186*4882a593Smuzhiyun * @bflags: black/white list flag for LUN 0
1187*4882a593Smuzhiyun * @scsi_level: Which version of the standard does this device adhere to
1188*4882a593Smuzhiyun * @rescan: passed to scsi_probe_add_lun()
1189*4882a593Smuzhiyun *
1190*4882a593Smuzhiyun * Description:
1191*4882a593Smuzhiyun * Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1192*4882a593Smuzhiyun * scanned) to some maximum lun until a LUN is found with no device
1193*4882a593Smuzhiyun * attached. Use the bflags to figure out any oddities.
1194*4882a593Smuzhiyun *
1195*4882a593Smuzhiyun * Modifies sdevscan->lun.
1196*4882a593Smuzhiyun **/
scsi_sequential_lun_scan(struct scsi_target * starget,blist_flags_t bflags,int scsi_level,enum scsi_scan_mode rescan)1197*4882a593Smuzhiyun static void scsi_sequential_lun_scan(struct scsi_target *starget,
1198*4882a593Smuzhiyun blist_flags_t bflags, int scsi_level,
1199*4882a593Smuzhiyun enum scsi_scan_mode rescan)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun uint max_dev_lun;
1202*4882a593Smuzhiyun u64 sparse_lun, lun;
1203*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1206*4882a593Smuzhiyun "scsi scan: Sequential scan\n"));
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun max_dev_lun = min(max_scsi_luns, shost->max_lun);
1209*4882a593Smuzhiyun /*
1210*4882a593Smuzhiyun * If this device is known to support sparse multiple units,
1211*4882a593Smuzhiyun * override the other settings, and scan all of them. Normally,
1212*4882a593Smuzhiyun * SCSI-3 devices should be scanned via the REPORT LUNS.
1213*4882a593Smuzhiyun */
1214*4882a593Smuzhiyun if (bflags & BLIST_SPARSELUN) {
1215*4882a593Smuzhiyun max_dev_lun = shost->max_lun;
1216*4882a593Smuzhiyun sparse_lun = 1;
1217*4882a593Smuzhiyun } else
1218*4882a593Smuzhiyun sparse_lun = 0;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /*
1221*4882a593Smuzhiyun * If less than SCSI_1_CCS, and no special lun scanning, stop
1222*4882a593Smuzhiyun * scanning; this matches 2.4 behaviour, but could just be a bug
1223*4882a593Smuzhiyun * (to continue scanning a SCSI_1_CCS device).
1224*4882a593Smuzhiyun *
1225*4882a593Smuzhiyun * This test is broken. We might not have any device on lun0 for
1226*4882a593Smuzhiyun * a sparselun device, and if that's the case then how would we
1227*4882a593Smuzhiyun * know the real scsi_level, eh? It might make sense to just not
1228*4882a593Smuzhiyun * scan any SCSI_1 device for non-0 luns, but that check would best
1229*4882a593Smuzhiyun * go into scsi_alloc_sdev() and just have it return null when asked
1230*4882a593Smuzhiyun * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1231*4882a593Smuzhiyun *
1232*4882a593Smuzhiyun if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1233*4882a593Smuzhiyun ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1234*4882a593Smuzhiyun == 0))
1235*4882a593Smuzhiyun return;
1236*4882a593Smuzhiyun */
1237*4882a593Smuzhiyun /*
1238*4882a593Smuzhiyun * If this device is known to support multiple units, override
1239*4882a593Smuzhiyun * the other settings, and scan all of them.
1240*4882a593Smuzhiyun */
1241*4882a593Smuzhiyun if (bflags & BLIST_FORCELUN)
1242*4882a593Smuzhiyun max_dev_lun = shost->max_lun;
1243*4882a593Smuzhiyun /*
1244*4882a593Smuzhiyun * REGAL CDC-4X: avoid hang after LUN 4
1245*4882a593Smuzhiyun */
1246*4882a593Smuzhiyun if (bflags & BLIST_MAX5LUN)
1247*4882a593Smuzhiyun max_dev_lun = min(5U, max_dev_lun);
1248*4882a593Smuzhiyun /*
1249*4882a593Smuzhiyun * Do not scan SCSI-2 or lower device past LUN 7, unless
1250*4882a593Smuzhiyun * BLIST_LARGELUN.
1251*4882a593Smuzhiyun */
1252*4882a593Smuzhiyun if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1253*4882a593Smuzhiyun max_dev_lun = min(8U, max_dev_lun);
1254*4882a593Smuzhiyun else
1255*4882a593Smuzhiyun max_dev_lun = min(256U, max_dev_lun);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun /*
1258*4882a593Smuzhiyun * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1259*4882a593Smuzhiyun * until we reach the max, or no LUN is found and we are not
1260*4882a593Smuzhiyun * sparse_lun.
1261*4882a593Smuzhiyun */
1262*4882a593Smuzhiyun for (lun = 1; lun < max_dev_lun; ++lun)
1263*4882a593Smuzhiyun if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1264*4882a593Smuzhiyun NULL) != SCSI_SCAN_LUN_PRESENT) &&
1265*4882a593Smuzhiyun !sparse_lun)
1266*4882a593Smuzhiyun return;
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /**
1270*4882a593Smuzhiyun * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1271*4882a593Smuzhiyun * @starget: which target
1272*4882a593Smuzhiyun * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1273*4882a593Smuzhiyun * @rescan: nonzero if we can skip code only needed on first scan
1274*4882a593Smuzhiyun *
1275*4882a593Smuzhiyun * Description:
1276*4882a593Smuzhiyun * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1277*4882a593Smuzhiyun * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1278*4882a593Smuzhiyun *
1279*4882a593Smuzhiyun * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1280*4882a593Smuzhiyun * LUNs even if it's older than SCSI-3.
1281*4882a593Smuzhiyun * If BLIST_NOREPORTLUN is set, return 1 always.
1282*4882a593Smuzhiyun * If BLIST_NOLUN is set, return 0 always.
1283*4882a593Smuzhiyun * If starget->no_report_luns is set, return 1 always.
1284*4882a593Smuzhiyun *
1285*4882a593Smuzhiyun * Return:
1286*4882a593Smuzhiyun * 0: scan completed (or no memory, so further scanning is futile)
1287*4882a593Smuzhiyun * 1: could not scan with REPORT LUN
1288*4882a593Smuzhiyun **/
scsi_report_lun_scan(struct scsi_target * starget,blist_flags_t bflags,enum scsi_scan_mode rescan)1289*4882a593Smuzhiyun static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1290*4882a593Smuzhiyun enum scsi_scan_mode rescan)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1293*4882a593Smuzhiyun unsigned int length;
1294*4882a593Smuzhiyun u64 lun;
1295*4882a593Smuzhiyun unsigned int num_luns;
1296*4882a593Smuzhiyun unsigned int retries;
1297*4882a593Smuzhiyun int result;
1298*4882a593Smuzhiyun struct scsi_lun *lunp, *lun_data;
1299*4882a593Smuzhiyun struct scsi_sense_hdr sshdr;
1300*4882a593Smuzhiyun struct scsi_device *sdev;
1301*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1302*4882a593Smuzhiyun int ret = 0;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun /*
1305*4882a593Smuzhiyun * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1306*4882a593Smuzhiyun * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1307*4882a593Smuzhiyun * support more than 8 LUNs.
1308*4882a593Smuzhiyun * Don't attempt if the target doesn't support REPORT LUNS.
1309*4882a593Smuzhiyun */
1310*4882a593Smuzhiyun if (bflags & BLIST_NOREPORTLUN)
1311*4882a593Smuzhiyun return 1;
1312*4882a593Smuzhiyun if (starget->scsi_level < SCSI_2 &&
1313*4882a593Smuzhiyun starget->scsi_level != SCSI_UNKNOWN)
1314*4882a593Smuzhiyun return 1;
1315*4882a593Smuzhiyun if (starget->scsi_level < SCSI_3 &&
1316*4882a593Smuzhiyun (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1317*4882a593Smuzhiyun return 1;
1318*4882a593Smuzhiyun if (bflags & BLIST_NOLUN)
1319*4882a593Smuzhiyun return 0;
1320*4882a593Smuzhiyun if (starget->no_report_luns)
1321*4882a593Smuzhiyun return 1;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1324*4882a593Smuzhiyun sdev = scsi_alloc_sdev(starget, 0, NULL);
1325*4882a593Smuzhiyun if (!sdev)
1326*4882a593Smuzhiyun return 0;
1327*4882a593Smuzhiyun if (scsi_device_get(sdev)) {
1328*4882a593Smuzhiyun __scsi_remove_device(sdev);
1329*4882a593Smuzhiyun return 0;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun /*
1334*4882a593Smuzhiyun * Allocate enough to hold the header (the same size as one scsi_lun)
1335*4882a593Smuzhiyun * plus the number of luns we are requesting. 511 was the default
1336*4882a593Smuzhiyun * value of the now removed max_report_luns parameter.
1337*4882a593Smuzhiyun */
1338*4882a593Smuzhiyun length = (511 + 1) * sizeof(struct scsi_lun);
1339*4882a593Smuzhiyun retry:
1340*4882a593Smuzhiyun lun_data = kmalloc(length, GFP_KERNEL |
1341*4882a593Smuzhiyun (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1342*4882a593Smuzhiyun if (!lun_data) {
1343*4882a593Smuzhiyun printk(ALLOC_FAILURE_MSG, __func__);
1344*4882a593Smuzhiyun goto out;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun scsi_cmd[0] = REPORT_LUNS;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun /*
1350*4882a593Smuzhiyun * bytes 1 - 5: reserved, set to zero.
1351*4882a593Smuzhiyun */
1352*4882a593Smuzhiyun memset(&scsi_cmd[1], 0, 5);
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /*
1355*4882a593Smuzhiyun * bytes 6 - 9: length of the command.
1356*4882a593Smuzhiyun */
1357*4882a593Smuzhiyun put_unaligned_be32(length, &scsi_cmd[6]);
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun scsi_cmd[10] = 0; /* reserved */
1360*4882a593Smuzhiyun scsi_cmd[11] = 0; /* control */
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun /*
1363*4882a593Smuzhiyun * We can get a UNIT ATTENTION, for example a power on/reset, so
1364*4882a593Smuzhiyun * retry a few times (like sd.c does for TEST UNIT READY).
1365*4882a593Smuzhiyun * Experience shows some combinations of adapter/devices get at
1366*4882a593Smuzhiyun * least two power on/resets.
1367*4882a593Smuzhiyun *
1368*4882a593Smuzhiyun * Illegal requests (for devices that do not support REPORT LUNS)
1369*4882a593Smuzhiyun * should come through as a check condition, and will not generate
1370*4882a593Smuzhiyun * a retry.
1371*4882a593Smuzhiyun */
1372*4882a593Smuzhiyun for (retries = 0; retries < 3; retries++) {
1373*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1374*4882a593Smuzhiyun "scsi scan: Sending REPORT LUNS to (try %d)\n",
1375*4882a593Smuzhiyun retries));
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1378*4882a593Smuzhiyun lun_data, length, &sshdr,
1379*4882a593Smuzhiyun SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1382*4882a593Smuzhiyun "scsi scan: REPORT LUNS"
1383*4882a593Smuzhiyun " %s (try %d) result 0x%x\n",
1384*4882a593Smuzhiyun result ? "failed" : "successful",
1385*4882a593Smuzhiyun retries, result));
1386*4882a593Smuzhiyun if (result == 0)
1387*4882a593Smuzhiyun break;
1388*4882a593Smuzhiyun else if (scsi_sense_valid(&sshdr)) {
1389*4882a593Smuzhiyun if (sshdr.sense_key != UNIT_ATTENTION)
1390*4882a593Smuzhiyun break;
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun if (result) {
1395*4882a593Smuzhiyun /*
1396*4882a593Smuzhiyun * The device probably does not support a REPORT LUN command
1397*4882a593Smuzhiyun */
1398*4882a593Smuzhiyun ret = 1;
1399*4882a593Smuzhiyun goto out_err;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun /*
1403*4882a593Smuzhiyun * Get the length from the first four bytes of lun_data.
1404*4882a593Smuzhiyun */
1405*4882a593Smuzhiyun if (get_unaligned_be32(lun_data->scsi_lun) +
1406*4882a593Smuzhiyun sizeof(struct scsi_lun) > length) {
1407*4882a593Smuzhiyun length = get_unaligned_be32(lun_data->scsi_lun) +
1408*4882a593Smuzhiyun sizeof(struct scsi_lun);
1409*4882a593Smuzhiyun kfree(lun_data);
1410*4882a593Smuzhiyun goto retry;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun length = get_unaligned_be32(lun_data->scsi_lun);
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun num_luns = (length / sizeof(struct scsi_lun));
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1417*4882a593Smuzhiyun "scsi scan: REPORT LUN scan\n"));
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun /*
1420*4882a593Smuzhiyun * Scan the luns in lun_data. The entry at offset 0 is really
1421*4882a593Smuzhiyun * the header, so start at 1 and go up to and including num_luns.
1422*4882a593Smuzhiyun */
1423*4882a593Smuzhiyun for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1424*4882a593Smuzhiyun lun = scsilun_to_int(lunp);
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun if (lun > sdev->host->max_lun) {
1427*4882a593Smuzhiyun sdev_printk(KERN_WARNING, sdev,
1428*4882a593Smuzhiyun "lun%llu has a LUN larger than"
1429*4882a593Smuzhiyun " allowed by the host adapter\n", lun);
1430*4882a593Smuzhiyun } else {
1431*4882a593Smuzhiyun int res;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun res = scsi_probe_and_add_lun(starget,
1434*4882a593Smuzhiyun lun, NULL, NULL, rescan, NULL);
1435*4882a593Smuzhiyun if (res == SCSI_SCAN_NO_RESPONSE) {
1436*4882a593Smuzhiyun /*
1437*4882a593Smuzhiyun * Got some results, but now none, abort.
1438*4882a593Smuzhiyun */
1439*4882a593Smuzhiyun sdev_printk(KERN_ERR, sdev,
1440*4882a593Smuzhiyun "Unexpected response"
1441*4882a593Smuzhiyun " from lun %llu while scanning, scan"
1442*4882a593Smuzhiyun " aborted\n", (unsigned long long)lun);
1443*4882a593Smuzhiyun break;
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun out_err:
1449*4882a593Smuzhiyun kfree(lun_data);
1450*4882a593Smuzhiyun out:
1451*4882a593Smuzhiyun if (scsi_device_created(sdev))
1452*4882a593Smuzhiyun /*
1453*4882a593Smuzhiyun * the sdev we used didn't appear in the report luns scan
1454*4882a593Smuzhiyun */
1455*4882a593Smuzhiyun __scsi_remove_device(sdev);
1456*4882a593Smuzhiyun scsi_device_put(sdev);
1457*4882a593Smuzhiyun return ret;
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun
__scsi_add_device(struct Scsi_Host * shost,uint channel,uint id,u64 lun,void * hostdata)1460*4882a593Smuzhiyun struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1461*4882a593Smuzhiyun uint id, u64 lun, void *hostdata)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun struct scsi_device *sdev = ERR_PTR(-ENODEV);
1464*4882a593Smuzhiyun struct device *parent = &shost->shost_gendev;
1465*4882a593Smuzhiyun struct scsi_target *starget;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun if (strncmp(scsi_scan_type, "none", 4) == 0)
1468*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun starget = scsi_alloc_target(parent, channel, id);
1471*4882a593Smuzhiyun if (!starget)
1472*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1473*4882a593Smuzhiyun scsi_autopm_get_target(starget);
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun mutex_lock(&shost->scan_mutex);
1476*4882a593Smuzhiyun if (!shost->async_scan)
1477*4882a593Smuzhiyun scsi_complete_async_scans();
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1480*4882a593Smuzhiyun scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1481*4882a593Smuzhiyun scsi_autopm_put_host(shost);
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
1484*4882a593Smuzhiyun scsi_autopm_put_target(starget);
1485*4882a593Smuzhiyun /*
1486*4882a593Smuzhiyun * paired with scsi_alloc_target(). Target will be destroyed unless
1487*4882a593Smuzhiyun * scsi_probe_and_add_lun made an underlying device visible
1488*4882a593Smuzhiyun */
1489*4882a593Smuzhiyun scsi_target_reap(starget);
1490*4882a593Smuzhiyun put_device(&starget->dev);
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun return sdev;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun EXPORT_SYMBOL(__scsi_add_device);
1495*4882a593Smuzhiyun
scsi_add_device(struct Scsi_Host * host,uint channel,uint target,u64 lun)1496*4882a593Smuzhiyun int scsi_add_device(struct Scsi_Host *host, uint channel,
1497*4882a593Smuzhiyun uint target, u64 lun)
1498*4882a593Smuzhiyun {
1499*4882a593Smuzhiyun struct scsi_device *sdev =
1500*4882a593Smuzhiyun __scsi_add_device(host, channel, target, lun, NULL);
1501*4882a593Smuzhiyun if (IS_ERR(sdev))
1502*4882a593Smuzhiyun return PTR_ERR(sdev);
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun scsi_device_put(sdev);
1505*4882a593Smuzhiyun return 0;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_add_device);
1508*4882a593Smuzhiyun
scsi_rescan_device(struct device * dev)1509*4882a593Smuzhiyun void scsi_rescan_device(struct device *dev)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun struct scsi_device *sdev = to_scsi_device(dev);
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun device_lock(dev);
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun scsi_attach_vpd(sdev);
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun if (sdev->handler && sdev->handler->rescan)
1518*4882a593Smuzhiyun sdev->handler->rescan(sdev);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun if (dev->driver && try_module_get(dev->driver->owner)) {
1521*4882a593Smuzhiyun struct scsi_driver *drv = to_scsi_driver(dev->driver);
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun if (drv->rescan)
1524*4882a593Smuzhiyun drv->rescan(dev);
1525*4882a593Smuzhiyun module_put(dev->driver->owner);
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun device_unlock(dev);
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_rescan_device);
1530*4882a593Smuzhiyun
__scsi_scan_target(struct device * parent,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1531*4882a593Smuzhiyun static void __scsi_scan_target(struct device *parent, unsigned int channel,
1532*4882a593Smuzhiyun unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1533*4882a593Smuzhiyun {
1534*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(parent);
1535*4882a593Smuzhiyun blist_flags_t bflags = 0;
1536*4882a593Smuzhiyun int res;
1537*4882a593Smuzhiyun struct scsi_target *starget;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun if (shost->this_id == id)
1540*4882a593Smuzhiyun /*
1541*4882a593Smuzhiyun * Don't scan the host adapter
1542*4882a593Smuzhiyun */
1543*4882a593Smuzhiyun return;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun starget = scsi_alloc_target(parent, channel, id);
1546*4882a593Smuzhiyun if (!starget)
1547*4882a593Smuzhiyun return;
1548*4882a593Smuzhiyun scsi_autopm_get_target(starget);
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun if (lun != SCAN_WILD_CARD) {
1551*4882a593Smuzhiyun /*
1552*4882a593Smuzhiyun * Scan for a specific host/chan/id/lun.
1553*4882a593Smuzhiyun */
1554*4882a593Smuzhiyun scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1555*4882a593Smuzhiyun goto out_reap;
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun /*
1559*4882a593Smuzhiyun * Scan LUN 0, if there is some response, scan further. Ideally, we
1560*4882a593Smuzhiyun * would not configure LUN 0 until all LUNs are scanned.
1561*4882a593Smuzhiyun */
1562*4882a593Smuzhiyun res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1563*4882a593Smuzhiyun if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1564*4882a593Smuzhiyun if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1565*4882a593Smuzhiyun /*
1566*4882a593Smuzhiyun * The REPORT LUN did not scan the target,
1567*4882a593Smuzhiyun * do a sequential scan.
1568*4882a593Smuzhiyun */
1569*4882a593Smuzhiyun scsi_sequential_lun_scan(starget, bflags,
1570*4882a593Smuzhiyun starget->scsi_level, rescan);
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun out_reap:
1574*4882a593Smuzhiyun scsi_autopm_put_target(starget);
1575*4882a593Smuzhiyun /*
1576*4882a593Smuzhiyun * paired with scsi_alloc_target(): determine if the target has
1577*4882a593Smuzhiyun * any children at all and if not, nuke it
1578*4882a593Smuzhiyun */
1579*4882a593Smuzhiyun scsi_target_reap(starget);
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun put_device(&starget->dev);
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun /**
1585*4882a593Smuzhiyun * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1586*4882a593Smuzhiyun * @parent: host to scan
1587*4882a593Smuzhiyun * @channel: channel to scan
1588*4882a593Smuzhiyun * @id: target id to scan
1589*4882a593Smuzhiyun * @lun: Specific LUN to scan or SCAN_WILD_CARD
1590*4882a593Smuzhiyun * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for
1591*4882a593Smuzhiyun * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
1592*4882a593Smuzhiyun * and SCSI_SCAN_MANUAL to force scanning even if
1593*4882a593Smuzhiyun * 'scan=manual' is set.
1594*4882a593Smuzhiyun *
1595*4882a593Smuzhiyun * Description:
1596*4882a593Smuzhiyun * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1597*4882a593Smuzhiyun * and possibly all LUNs on the target id.
1598*4882a593Smuzhiyun *
1599*4882a593Smuzhiyun * First try a REPORT LUN scan, if that does not scan the target, do a
1600*4882a593Smuzhiyun * sequential scan of LUNs on the target id.
1601*4882a593Smuzhiyun **/
scsi_scan_target(struct device * parent,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1602*4882a593Smuzhiyun void scsi_scan_target(struct device *parent, unsigned int channel,
1603*4882a593Smuzhiyun unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(parent);
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun if (strncmp(scsi_scan_type, "none", 4) == 0)
1608*4882a593Smuzhiyun return;
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun if (rescan != SCSI_SCAN_MANUAL &&
1611*4882a593Smuzhiyun strncmp(scsi_scan_type, "manual", 6) == 0)
1612*4882a593Smuzhiyun return;
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun mutex_lock(&shost->scan_mutex);
1615*4882a593Smuzhiyun if (!shost->async_scan)
1616*4882a593Smuzhiyun scsi_complete_async_scans();
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1619*4882a593Smuzhiyun __scsi_scan_target(parent, channel, id, lun, rescan);
1620*4882a593Smuzhiyun scsi_autopm_put_host(shost);
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_scan_target);
1625*4882a593Smuzhiyun
scsi_scan_channel(struct Scsi_Host * shost,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1626*4882a593Smuzhiyun static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1627*4882a593Smuzhiyun unsigned int id, u64 lun,
1628*4882a593Smuzhiyun enum scsi_scan_mode rescan)
1629*4882a593Smuzhiyun {
1630*4882a593Smuzhiyun uint order_id;
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun if (id == SCAN_WILD_CARD)
1633*4882a593Smuzhiyun for (id = 0; id < shost->max_id; ++id) {
1634*4882a593Smuzhiyun /*
1635*4882a593Smuzhiyun * XXX adapter drivers when possible (FCP, iSCSI)
1636*4882a593Smuzhiyun * could modify max_id to match the current max,
1637*4882a593Smuzhiyun * not the absolute max.
1638*4882a593Smuzhiyun *
1639*4882a593Smuzhiyun * XXX add a shost id iterator, so for example,
1640*4882a593Smuzhiyun * the FC ID can be the same as a target id
1641*4882a593Smuzhiyun * without a huge overhead of sparse id's.
1642*4882a593Smuzhiyun */
1643*4882a593Smuzhiyun if (shost->reverse_ordering)
1644*4882a593Smuzhiyun /*
1645*4882a593Smuzhiyun * Scan from high to low id.
1646*4882a593Smuzhiyun */
1647*4882a593Smuzhiyun order_id = shost->max_id - id - 1;
1648*4882a593Smuzhiyun else
1649*4882a593Smuzhiyun order_id = id;
1650*4882a593Smuzhiyun __scsi_scan_target(&shost->shost_gendev, channel,
1651*4882a593Smuzhiyun order_id, lun, rescan);
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun else
1654*4882a593Smuzhiyun __scsi_scan_target(&shost->shost_gendev, channel,
1655*4882a593Smuzhiyun id, lun, rescan);
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun
scsi_scan_host_selected(struct Scsi_Host * shost,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1658*4882a593Smuzhiyun int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1659*4882a593Smuzhiyun unsigned int id, u64 lun,
1660*4882a593Smuzhiyun enum scsi_scan_mode rescan)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1663*4882a593Smuzhiyun "%s: <%u:%u:%llu>\n",
1664*4882a593Smuzhiyun __func__, channel, id, lun));
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1667*4882a593Smuzhiyun ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1668*4882a593Smuzhiyun ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1669*4882a593Smuzhiyun return -EINVAL;
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun mutex_lock(&shost->scan_mutex);
1672*4882a593Smuzhiyun if (!shost->async_scan)
1673*4882a593Smuzhiyun scsi_complete_async_scans();
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1676*4882a593Smuzhiyun if (channel == SCAN_WILD_CARD)
1677*4882a593Smuzhiyun for (channel = 0; channel <= shost->max_channel;
1678*4882a593Smuzhiyun channel++)
1679*4882a593Smuzhiyun scsi_scan_channel(shost, channel, id, lun,
1680*4882a593Smuzhiyun rescan);
1681*4882a593Smuzhiyun else
1682*4882a593Smuzhiyun scsi_scan_channel(shost, channel, id, lun, rescan);
1683*4882a593Smuzhiyun scsi_autopm_put_host(shost);
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun return 0;
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun
scsi_sysfs_add_devices(struct Scsi_Host * shost)1690*4882a593Smuzhiyun static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1691*4882a593Smuzhiyun {
1692*4882a593Smuzhiyun struct scsi_device *sdev;
1693*4882a593Smuzhiyun shost_for_each_device(sdev, shost) {
1694*4882a593Smuzhiyun /* target removed before the device could be added */
1695*4882a593Smuzhiyun if (sdev->sdev_state == SDEV_DEL)
1696*4882a593Smuzhiyun continue;
1697*4882a593Smuzhiyun /* If device is already visible, skip adding it to sysfs */
1698*4882a593Smuzhiyun if (sdev->is_visible)
1699*4882a593Smuzhiyun continue;
1700*4882a593Smuzhiyun if (!scsi_host_scan_allowed(shost) ||
1701*4882a593Smuzhiyun scsi_sysfs_add_sdev(sdev) != 0)
1702*4882a593Smuzhiyun __scsi_remove_device(sdev);
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun }
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun /**
1707*4882a593Smuzhiyun * scsi_prep_async_scan - prepare for an async scan
1708*4882a593Smuzhiyun * @shost: the host which will be scanned
1709*4882a593Smuzhiyun * Returns: a cookie to be passed to scsi_finish_async_scan()
1710*4882a593Smuzhiyun *
1711*4882a593Smuzhiyun * Tells the midlayer this host is going to do an asynchronous scan.
1712*4882a593Smuzhiyun * It reserves the host's position in the scanning list and ensures
1713*4882a593Smuzhiyun * that other asynchronous scans started after this one won't affect the
1714*4882a593Smuzhiyun * ordering of the discovered devices.
1715*4882a593Smuzhiyun */
scsi_prep_async_scan(struct Scsi_Host * shost)1716*4882a593Smuzhiyun static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1717*4882a593Smuzhiyun {
1718*4882a593Smuzhiyun struct async_scan_data *data = NULL;
1719*4882a593Smuzhiyun unsigned long flags;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun if (strncmp(scsi_scan_type, "sync", 4) == 0)
1722*4882a593Smuzhiyun return NULL;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun mutex_lock(&shost->scan_mutex);
1725*4882a593Smuzhiyun if (shost->async_scan) {
1726*4882a593Smuzhiyun shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1727*4882a593Smuzhiyun goto err;
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun data = kmalloc(sizeof(*data), GFP_KERNEL);
1731*4882a593Smuzhiyun if (!data)
1732*4882a593Smuzhiyun goto err;
1733*4882a593Smuzhiyun data->shost = scsi_host_get(shost);
1734*4882a593Smuzhiyun if (!data->shost)
1735*4882a593Smuzhiyun goto err;
1736*4882a593Smuzhiyun init_completion(&data->prev_finished);
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
1739*4882a593Smuzhiyun shost->async_scan = 1;
1740*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
1741*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun spin_lock(&async_scan_lock);
1744*4882a593Smuzhiyun if (list_empty(&scanning_hosts))
1745*4882a593Smuzhiyun complete(&data->prev_finished);
1746*4882a593Smuzhiyun list_add_tail(&data->list, &scanning_hosts);
1747*4882a593Smuzhiyun spin_unlock(&async_scan_lock);
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun return data;
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun err:
1752*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
1753*4882a593Smuzhiyun kfree(data);
1754*4882a593Smuzhiyun return NULL;
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun /**
1758*4882a593Smuzhiyun * scsi_finish_async_scan - asynchronous scan has finished
1759*4882a593Smuzhiyun * @data: cookie returned from earlier call to scsi_prep_async_scan()
1760*4882a593Smuzhiyun *
1761*4882a593Smuzhiyun * All the devices currently attached to this host have been found.
1762*4882a593Smuzhiyun * This function announces all the devices it has found to the rest
1763*4882a593Smuzhiyun * of the system.
1764*4882a593Smuzhiyun */
scsi_finish_async_scan(struct async_scan_data * data)1765*4882a593Smuzhiyun static void scsi_finish_async_scan(struct async_scan_data *data)
1766*4882a593Smuzhiyun {
1767*4882a593Smuzhiyun struct Scsi_Host *shost;
1768*4882a593Smuzhiyun unsigned long flags;
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun if (!data)
1771*4882a593Smuzhiyun return;
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun shost = data->shost;
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun mutex_lock(&shost->scan_mutex);
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun if (!shost->async_scan) {
1778*4882a593Smuzhiyun shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1779*4882a593Smuzhiyun dump_stack();
1780*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
1781*4882a593Smuzhiyun return;
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun wait_for_completion(&data->prev_finished);
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun scsi_sysfs_add_devices(shost);
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
1789*4882a593Smuzhiyun shost->async_scan = 0;
1790*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun spin_lock(&async_scan_lock);
1795*4882a593Smuzhiyun list_del(&data->list);
1796*4882a593Smuzhiyun if (!list_empty(&scanning_hosts)) {
1797*4882a593Smuzhiyun struct async_scan_data *next = list_entry(scanning_hosts.next,
1798*4882a593Smuzhiyun struct async_scan_data, list);
1799*4882a593Smuzhiyun complete(&next->prev_finished);
1800*4882a593Smuzhiyun }
1801*4882a593Smuzhiyun spin_unlock(&async_scan_lock);
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun scsi_autopm_put_host(shost);
1804*4882a593Smuzhiyun scsi_host_put(shost);
1805*4882a593Smuzhiyun kfree(data);
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun
do_scsi_scan_host(struct Scsi_Host * shost)1808*4882a593Smuzhiyun static void do_scsi_scan_host(struct Scsi_Host *shost)
1809*4882a593Smuzhiyun {
1810*4882a593Smuzhiyun if (shost->hostt->scan_finished) {
1811*4882a593Smuzhiyun unsigned long start = jiffies;
1812*4882a593Smuzhiyun if (shost->hostt->scan_start)
1813*4882a593Smuzhiyun shost->hostt->scan_start(shost);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun while (!shost->hostt->scan_finished(shost, jiffies - start))
1816*4882a593Smuzhiyun msleep(10);
1817*4882a593Smuzhiyun } else {
1818*4882a593Smuzhiyun scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1819*4882a593Smuzhiyun SCAN_WILD_CARD, 0);
1820*4882a593Smuzhiyun }
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun
do_scan_async(void * _data,async_cookie_t c)1823*4882a593Smuzhiyun static void do_scan_async(void *_data, async_cookie_t c)
1824*4882a593Smuzhiyun {
1825*4882a593Smuzhiyun struct async_scan_data *data = _data;
1826*4882a593Smuzhiyun struct Scsi_Host *shost = data->shost;
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun do_scsi_scan_host(shost);
1829*4882a593Smuzhiyun scsi_finish_async_scan(data);
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun /**
1833*4882a593Smuzhiyun * scsi_scan_host - scan the given adapter
1834*4882a593Smuzhiyun * @shost: adapter to scan
1835*4882a593Smuzhiyun **/
scsi_scan_host(struct Scsi_Host * shost)1836*4882a593Smuzhiyun void scsi_scan_host(struct Scsi_Host *shost)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun struct async_scan_data *data;
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun if (strncmp(scsi_scan_type, "none", 4) == 0 ||
1841*4882a593Smuzhiyun strncmp(scsi_scan_type, "manual", 6) == 0)
1842*4882a593Smuzhiyun return;
1843*4882a593Smuzhiyun if (scsi_autopm_get_host(shost) < 0)
1844*4882a593Smuzhiyun return;
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun data = scsi_prep_async_scan(shost);
1847*4882a593Smuzhiyun if (!data) {
1848*4882a593Smuzhiyun do_scsi_scan_host(shost);
1849*4882a593Smuzhiyun scsi_autopm_put_host(shost);
1850*4882a593Smuzhiyun return;
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun /* register with the async subsystem so wait_for_device_probe()
1854*4882a593Smuzhiyun * will flush this work
1855*4882a593Smuzhiyun */
1856*4882a593Smuzhiyun async_schedule(do_scan_async, data);
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_scan_host);
1861*4882a593Smuzhiyun
scsi_forget_host(struct Scsi_Host * shost)1862*4882a593Smuzhiyun void scsi_forget_host(struct Scsi_Host *shost)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun struct scsi_device *sdev;
1865*4882a593Smuzhiyun unsigned long flags;
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun restart:
1868*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
1869*4882a593Smuzhiyun list_for_each_entry(sdev, &shost->__devices, siblings) {
1870*4882a593Smuzhiyun if (sdev->sdev_state == SDEV_DEL)
1871*4882a593Smuzhiyun continue;
1872*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
1873*4882a593Smuzhiyun __scsi_remove_device(sdev);
1874*4882a593Smuzhiyun goto restart;
1875*4882a593Smuzhiyun }
1876*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
1877*4882a593Smuzhiyun }
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun /**
1880*4882a593Smuzhiyun * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself
1881*4882a593Smuzhiyun * @shost: Host that needs a scsi_device
1882*4882a593Smuzhiyun *
1883*4882a593Smuzhiyun * Lock status: None assumed.
1884*4882a593Smuzhiyun *
1885*4882a593Smuzhiyun * Returns: The scsi_device or NULL
1886*4882a593Smuzhiyun *
1887*4882a593Smuzhiyun * Notes:
1888*4882a593Smuzhiyun * Attach a single scsi_device to the Scsi_Host - this should
1889*4882a593Smuzhiyun * be made to look like a "pseudo-device" that points to the
1890*4882a593Smuzhiyun * HA itself.
1891*4882a593Smuzhiyun *
1892*4882a593Smuzhiyun * Note - this device is not accessible from any high-level
1893*4882a593Smuzhiyun * drivers (including generics), which is probably not
1894*4882a593Smuzhiyun * optimal. We can add hooks later to attach.
1895*4882a593Smuzhiyun */
scsi_get_host_dev(struct Scsi_Host * shost)1896*4882a593Smuzhiyun struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun struct scsi_device *sdev = NULL;
1899*4882a593Smuzhiyun struct scsi_target *starget;
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun mutex_lock(&shost->scan_mutex);
1902*4882a593Smuzhiyun if (!scsi_host_scan_allowed(shost))
1903*4882a593Smuzhiyun goto out;
1904*4882a593Smuzhiyun starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
1905*4882a593Smuzhiyun if (!starget)
1906*4882a593Smuzhiyun goto out;
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun sdev = scsi_alloc_sdev(starget, 0, NULL);
1909*4882a593Smuzhiyun if (sdev)
1910*4882a593Smuzhiyun sdev->borken = 0;
1911*4882a593Smuzhiyun else
1912*4882a593Smuzhiyun scsi_target_reap(starget);
1913*4882a593Smuzhiyun put_device(&starget->dev);
1914*4882a593Smuzhiyun out:
1915*4882a593Smuzhiyun mutex_unlock(&shost->scan_mutex);
1916*4882a593Smuzhiyun return sdev;
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_get_host_dev);
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun /**
1921*4882a593Smuzhiyun * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself
1922*4882a593Smuzhiyun * @sdev: Host device to be freed
1923*4882a593Smuzhiyun *
1924*4882a593Smuzhiyun * Lock status: None assumed.
1925*4882a593Smuzhiyun *
1926*4882a593Smuzhiyun * Returns: Nothing
1927*4882a593Smuzhiyun */
scsi_free_host_dev(struct scsi_device * sdev)1928*4882a593Smuzhiyun void scsi_free_host_dev(struct scsi_device *sdev)
1929*4882a593Smuzhiyun {
1930*4882a593Smuzhiyun BUG_ON(sdev->id != sdev->host->this_id);
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun __scsi_remove_device(sdev);
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun EXPORT_SYMBOL(scsi_free_host_dev);
1935*4882a593Smuzhiyun
1936