1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
3*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * GPL LICENSE SUMMARY
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16*4882a593Smuzhiyun * General Public License for more details.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
19*4882a593Smuzhiyun * along with this program; if not, write to the Free Software
20*4882a593Smuzhiyun * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21*4882a593Smuzhiyun * The full GNU General Public License is included in this distribution
22*4882a593Smuzhiyun * in the file called LICENSE.GPL.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * BSD LICENSE
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27*4882a593Smuzhiyun * All rights reserved.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
30*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
31*4882a593Smuzhiyun * are met:
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
34*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
35*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
36*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
37*4882a593Smuzhiyun * the documentation and/or other materials provided with the
38*4882a593Smuzhiyun * distribution.
39*4882a593Smuzhiyun * * Neither the name of Intel Corporation nor the names of its
40*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
41*4882a593Smuzhiyun * from this software without specific prior written permission.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun #include <scsi/sas.h>
56*4882a593Smuzhiyun #include <linux/bitops.h>
57*4882a593Smuzhiyun #include "isci.h"
58*4882a593Smuzhiyun #include "port.h"
59*4882a593Smuzhiyun #include "remote_device.h"
60*4882a593Smuzhiyun #include "request.h"
61*4882a593Smuzhiyun #include "remote_node_context.h"
62*4882a593Smuzhiyun #include "scu_event_codes.h"
63*4882a593Smuzhiyun #include "task.h"
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #undef C
66*4882a593Smuzhiyun #define C(a) (#a)
dev_state_name(enum sci_remote_device_states state)67*4882a593Smuzhiyun const char *dev_state_name(enum sci_remote_device_states state)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun static const char * const strings[] = REMOTE_DEV_STATES;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return strings[state];
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun #undef C
74*4882a593Smuzhiyun
sci_remote_device_suspend(struct isci_remote_device * idev,enum sci_remote_node_suspension_reasons reason)75*4882a593Smuzhiyun enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
76*4882a593Smuzhiyun enum sci_remote_node_suspension_reasons reason)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun return sci_remote_node_context_suspend(&idev->rnc, reason,
79*4882a593Smuzhiyun SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun * isci_remote_device_ready() - This function is called by the ihost when the
84*4882a593Smuzhiyun * remote device is ready. We mark the isci device as ready and signal the
85*4882a593Smuzhiyun * waiting proccess.
86*4882a593Smuzhiyun * @ihost: our valid isci_host
87*4882a593Smuzhiyun * @idev: remote device
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun */
isci_remote_device_ready(struct isci_host * ihost,struct isci_remote_device * idev)90*4882a593Smuzhiyun static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
93*4882a593Smuzhiyun "%s: idev = %p\n", __func__, idev);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun clear_bit(IDEV_IO_NCQERROR, &idev->flags);
96*4882a593Smuzhiyun set_bit(IDEV_IO_READY, &idev->flags);
97*4882a593Smuzhiyun if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
98*4882a593Smuzhiyun wake_up(&ihost->eventq);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
sci_remote_device_terminate_req(struct isci_host * ihost,struct isci_remote_device * idev,int check_abort,struct isci_request * ireq)101*4882a593Smuzhiyun static enum sci_status sci_remote_device_terminate_req(
102*4882a593Smuzhiyun struct isci_host *ihost,
103*4882a593Smuzhiyun struct isci_remote_device *idev,
104*4882a593Smuzhiyun int check_abort,
105*4882a593Smuzhiyun struct isci_request *ireq)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
108*4882a593Smuzhiyun (ireq->target_device != idev) ||
109*4882a593Smuzhiyun (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
110*4882a593Smuzhiyun return SCI_SUCCESS;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
113*4882a593Smuzhiyun "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
114*4882a593Smuzhiyun __func__, idev, idev->flags, ireq, ireq->target_device);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun return sci_controller_terminate_request(ihost, idev, ireq);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
sci_remote_device_terminate_reqs_checkabort(struct isci_remote_device * idev,int chk)121*4882a593Smuzhiyun static enum sci_status sci_remote_device_terminate_reqs_checkabort(
122*4882a593Smuzhiyun struct isci_remote_device *idev,
123*4882a593Smuzhiyun int chk)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
126*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
127*4882a593Smuzhiyun u32 i;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
130*4882a593Smuzhiyun struct isci_request *ireq = ihost->reqs[i];
131*4882a593Smuzhiyun enum sci_status s;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
134*4882a593Smuzhiyun if (s != SCI_SUCCESS)
135*4882a593Smuzhiyun status = s;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun return status;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
isci_compare_suspendcount(struct isci_remote_device * idev,u32 localcount)140*4882a593Smuzhiyun static bool isci_compare_suspendcount(
141*4882a593Smuzhiyun struct isci_remote_device *idev,
142*4882a593Smuzhiyun u32 localcount)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun smp_rmb();
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* Check for a change in the suspend count, or the RNC
147*4882a593Smuzhiyun * being destroyed.
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun return (localcount != idev->rnc.suspend_count)
150*4882a593Smuzhiyun || sci_remote_node_context_is_being_destroyed(&idev->rnc);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
isci_check_reqterm(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq,u32 localcount)153*4882a593Smuzhiyun static bool isci_check_reqterm(
154*4882a593Smuzhiyun struct isci_host *ihost,
155*4882a593Smuzhiyun struct isci_remote_device *idev,
156*4882a593Smuzhiyun struct isci_request *ireq,
157*4882a593Smuzhiyun u32 localcount)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun unsigned long flags;
160*4882a593Smuzhiyun bool res;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
163*4882a593Smuzhiyun res = isci_compare_suspendcount(idev, localcount)
164*4882a593Smuzhiyun && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
165*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return res;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
isci_check_devempty(struct isci_host * ihost,struct isci_remote_device * idev,u32 localcount)170*4882a593Smuzhiyun static bool isci_check_devempty(
171*4882a593Smuzhiyun struct isci_host *ihost,
172*4882a593Smuzhiyun struct isci_remote_device *idev,
173*4882a593Smuzhiyun u32 localcount)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun unsigned long flags;
176*4882a593Smuzhiyun bool res;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
179*4882a593Smuzhiyun res = isci_compare_suspendcount(idev, localcount)
180*4882a593Smuzhiyun && idev->started_request_count == 0;
181*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return res;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
isci_remote_device_terminate_requests(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)186*4882a593Smuzhiyun enum sci_status isci_remote_device_terminate_requests(
187*4882a593Smuzhiyun struct isci_host *ihost,
188*4882a593Smuzhiyun struct isci_remote_device *idev,
189*4882a593Smuzhiyun struct isci_request *ireq)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
192*4882a593Smuzhiyun unsigned long flags;
193*4882a593Smuzhiyun u32 rnc_suspend_count;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (isci_get_device(idev) == NULL) {
198*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
199*4882a593Smuzhiyun __func__, idev);
200*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
201*4882a593Smuzhiyun status = SCI_FAILURE;
202*4882a593Smuzhiyun } else {
203*4882a593Smuzhiyun /* If already suspended, don't wait for another suspension. */
204*4882a593Smuzhiyun smp_rmb();
205*4882a593Smuzhiyun rnc_suspend_count
206*4882a593Smuzhiyun = sci_remote_node_context_is_suspended(&idev->rnc)
207*4882a593Smuzhiyun ? 0 : idev->rnc.suspend_count;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
210*4882a593Smuzhiyun "%s: idev=%p, ireq=%p; started_request_count=%d, "
211*4882a593Smuzhiyun "rnc_suspend_count=%d, rnc.suspend_count=%d"
212*4882a593Smuzhiyun "about to wait\n",
213*4882a593Smuzhiyun __func__, idev, ireq, idev->started_request_count,
214*4882a593Smuzhiyun rnc_suspend_count, idev->rnc.suspend_count);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #define MAX_SUSPEND_MSECS 10000
217*4882a593Smuzhiyun if (ireq) {
218*4882a593Smuzhiyun /* Terminate a specific TC. */
219*4882a593Smuzhiyun set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
220*4882a593Smuzhiyun sci_remote_device_terminate_req(ihost, idev, 0, ireq);
221*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
222*4882a593Smuzhiyun if (!wait_event_timeout(ihost->eventq,
223*4882a593Smuzhiyun isci_check_reqterm(ihost, idev, ireq,
224*4882a593Smuzhiyun rnc_suspend_count),
225*4882a593Smuzhiyun msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
228*4882a593Smuzhiyun __func__, ihost->id);
229*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
230*4882a593Smuzhiyun "%s: ******* Timeout waiting for "
231*4882a593Smuzhiyun "suspend; idev=%p, current state %s; "
232*4882a593Smuzhiyun "started_request_count=%d, flags=%lx\n\t"
233*4882a593Smuzhiyun "rnc_suspend_count=%d, rnc.suspend_count=%d "
234*4882a593Smuzhiyun "RNC: current state %s, current "
235*4882a593Smuzhiyun "suspend_type %x dest state %d;\n"
236*4882a593Smuzhiyun "ireq=%p, ireq->flags = %lx\n",
237*4882a593Smuzhiyun __func__, idev,
238*4882a593Smuzhiyun dev_state_name(idev->sm.current_state_id),
239*4882a593Smuzhiyun idev->started_request_count, idev->flags,
240*4882a593Smuzhiyun rnc_suspend_count, idev->rnc.suspend_count,
241*4882a593Smuzhiyun rnc_state_name(idev->rnc.sm.current_state_id),
242*4882a593Smuzhiyun idev->rnc.suspend_type,
243*4882a593Smuzhiyun idev->rnc.destination_state,
244*4882a593Smuzhiyun ireq, ireq->flags);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
247*4882a593Smuzhiyun clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
248*4882a593Smuzhiyun if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
249*4882a593Smuzhiyun isci_free_tag(ihost, ireq->io_tag);
250*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
251*4882a593Smuzhiyun } else {
252*4882a593Smuzhiyun /* Terminate all TCs. */
253*4882a593Smuzhiyun sci_remote_device_terminate_requests(idev);
254*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
255*4882a593Smuzhiyun if (!wait_event_timeout(ihost->eventq,
256*4882a593Smuzhiyun isci_check_devempty(ihost, idev,
257*4882a593Smuzhiyun rnc_suspend_count),
258*4882a593Smuzhiyun msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
261*4882a593Smuzhiyun __func__, ihost->id);
262*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
263*4882a593Smuzhiyun "%s: ******* Timeout waiting for "
264*4882a593Smuzhiyun "suspend; idev=%p, current state %s; "
265*4882a593Smuzhiyun "started_request_count=%d, flags=%lx\n\t"
266*4882a593Smuzhiyun "rnc_suspend_count=%d, "
267*4882a593Smuzhiyun "RNC: current state %s, "
268*4882a593Smuzhiyun "rnc.suspend_count=%d, current "
269*4882a593Smuzhiyun "suspend_type %x dest state %d\n",
270*4882a593Smuzhiyun __func__, idev,
271*4882a593Smuzhiyun dev_state_name(idev->sm.current_state_id),
272*4882a593Smuzhiyun idev->started_request_count, idev->flags,
273*4882a593Smuzhiyun rnc_suspend_count,
274*4882a593Smuzhiyun rnc_state_name(idev->rnc.sm.current_state_id),
275*4882a593Smuzhiyun idev->rnc.suspend_count,
276*4882a593Smuzhiyun idev->rnc.suspend_type,
277*4882a593Smuzhiyun idev->rnc.destination_state);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
281*4882a593Smuzhiyun __func__, idev);
282*4882a593Smuzhiyun isci_put_device(idev);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun return status;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /**
288*4882a593Smuzhiyun * isci_remote_device_not_ready() - This function is called by the ihost when
289*4882a593Smuzhiyun * the remote device is not ready. We mark the isci device as ready (not
290*4882a593Smuzhiyun * "ready_for_io") and signal the waiting proccess.
291*4882a593Smuzhiyun * @isci_host: This parameter specifies the isci host object.
292*4882a593Smuzhiyun * @isci_device: This parameter specifies the remote device
293*4882a593Smuzhiyun *
294*4882a593Smuzhiyun * sci_lock is held on entrance to this function.
295*4882a593Smuzhiyun */
isci_remote_device_not_ready(struct isci_host * ihost,struct isci_remote_device * idev,u32 reason)296*4882a593Smuzhiyun static void isci_remote_device_not_ready(struct isci_host *ihost,
297*4882a593Smuzhiyun struct isci_remote_device *idev,
298*4882a593Smuzhiyun u32 reason)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
301*4882a593Smuzhiyun "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun switch (reason) {
304*4882a593Smuzhiyun case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
305*4882a593Smuzhiyun set_bit(IDEV_IO_NCQERROR, &idev->flags);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* Suspend the remote device so the I/O can be terminated. */
308*4882a593Smuzhiyun sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* Kill all outstanding requests for the device. */
311*4882a593Smuzhiyun sci_remote_device_terminate_requests(idev);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun fallthrough; /* into the default case */
314*4882a593Smuzhiyun default:
315*4882a593Smuzhiyun clear_bit(IDEV_IO_READY, &idev->flags);
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* called once the remote node context is ready to be freed.
321*4882a593Smuzhiyun * The remote device can now report that its stop operation is complete. none
322*4882a593Smuzhiyun */
rnc_destruct_done(void * _dev)323*4882a593Smuzhiyun static void rnc_destruct_done(void *_dev)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct isci_remote_device *idev = _dev;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun BUG_ON(idev->started_request_count != 0);
328*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_DEV_STOPPED);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
sci_remote_device_terminate_requests(struct isci_remote_device * idev)331*4882a593Smuzhiyun enum sci_status sci_remote_device_terminate_requests(
332*4882a593Smuzhiyun struct isci_remote_device *idev)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun return sci_remote_device_terminate_reqs_checkabort(idev, 0);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
sci_remote_device_stop(struct isci_remote_device * idev,u32 timeout)337*4882a593Smuzhiyun enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
338*4882a593Smuzhiyun u32 timeout)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
341*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun switch (state) {
344*4882a593Smuzhiyun case SCI_DEV_INITIAL:
345*4882a593Smuzhiyun case SCI_DEV_FAILED:
346*4882a593Smuzhiyun case SCI_DEV_FINAL:
347*4882a593Smuzhiyun default:
348*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
349*4882a593Smuzhiyun __func__, dev_state_name(state));
350*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
351*4882a593Smuzhiyun case SCI_DEV_STOPPED:
352*4882a593Smuzhiyun return SCI_SUCCESS;
353*4882a593Smuzhiyun case SCI_DEV_STARTING:
354*4882a593Smuzhiyun /* device not started so there had better be no requests */
355*4882a593Smuzhiyun BUG_ON(idev->started_request_count != 0);
356*4882a593Smuzhiyun sci_remote_node_context_destruct(&idev->rnc,
357*4882a593Smuzhiyun rnc_destruct_done, idev);
358*4882a593Smuzhiyun /* Transition to the stopping state and wait for the
359*4882a593Smuzhiyun * remote node to complete being posted and invalidated.
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun sci_change_state(sm, SCI_DEV_STOPPING);
362*4882a593Smuzhiyun return SCI_SUCCESS;
363*4882a593Smuzhiyun case SCI_DEV_READY:
364*4882a593Smuzhiyun case SCI_STP_DEV_IDLE:
365*4882a593Smuzhiyun case SCI_STP_DEV_CMD:
366*4882a593Smuzhiyun case SCI_STP_DEV_NCQ:
367*4882a593Smuzhiyun case SCI_STP_DEV_NCQ_ERROR:
368*4882a593Smuzhiyun case SCI_STP_DEV_AWAIT_RESET:
369*4882a593Smuzhiyun case SCI_SMP_DEV_IDLE:
370*4882a593Smuzhiyun case SCI_SMP_DEV_CMD:
371*4882a593Smuzhiyun sci_change_state(sm, SCI_DEV_STOPPING);
372*4882a593Smuzhiyun if (idev->started_request_count == 0)
373*4882a593Smuzhiyun sci_remote_node_context_destruct(&idev->rnc,
374*4882a593Smuzhiyun rnc_destruct_done,
375*4882a593Smuzhiyun idev);
376*4882a593Smuzhiyun else {
377*4882a593Smuzhiyun sci_remote_device_suspend(
378*4882a593Smuzhiyun idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
379*4882a593Smuzhiyun sci_remote_device_terminate_requests(idev);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun return SCI_SUCCESS;
382*4882a593Smuzhiyun case SCI_DEV_STOPPING:
383*4882a593Smuzhiyun /* All requests should have been terminated, but if there is an
384*4882a593Smuzhiyun * attempt to stop a device already in the stopping state, then
385*4882a593Smuzhiyun * try again to terminate.
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun return sci_remote_device_terminate_requests(idev);
388*4882a593Smuzhiyun case SCI_DEV_RESETTING:
389*4882a593Smuzhiyun sci_change_state(sm, SCI_DEV_STOPPING);
390*4882a593Smuzhiyun return SCI_SUCCESS;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
sci_remote_device_reset(struct isci_remote_device * idev)394*4882a593Smuzhiyun enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
397*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun switch (state) {
400*4882a593Smuzhiyun case SCI_DEV_INITIAL:
401*4882a593Smuzhiyun case SCI_DEV_STOPPED:
402*4882a593Smuzhiyun case SCI_DEV_STARTING:
403*4882a593Smuzhiyun case SCI_SMP_DEV_IDLE:
404*4882a593Smuzhiyun case SCI_SMP_DEV_CMD:
405*4882a593Smuzhiyun case SCI_DEV_STOPPING:
406*4882a593Smuzhiyun case SCI_DEV_FAILED:
407*4882a593Smuzhiyun case SCI_DEV_RESETTING:
408*4882a593Smuzhiyun case SCI_DEV_FINAL:
409*4882a593Smuzhiyun default:
410*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
411*4882a593Smuzhiyun __func__, dev_state_name(state));
412*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
413*4882a593Smuzhiyun case SCI_DEV_READY:
414*4882a593Smuzhiyun case SCI_STP_DEV_IDLE:
415*4882a593Smuzhiyun case SCI_STP_DEV_CMD:
416*4882a593Smuzhiyun case SCI_STP_DEV_NCQ:
417*4882a593Smuzhiyun case SCI_STP_DEV_NCQ_ERROR:
418*4882a593Smuzhiyun case SCI_STP_DEV_AWAIT_RESET:
419*4882a593Smuzhiyun sci_change_state(sm, SCI_DEV_RESETTING);
420*4882a593Smuzhiyun return SCI_SUCCESS;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
sci_remote_device_reset_complete(struct isci_remote_device * idev)424*4882a593Smuzhiyun enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
427*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (state != SCI_DEV_RESETTING) {
430*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
431*4882a593Smuzhiyun __func__, dev_state_name(state));
432*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun sci_change_state(sm, SCI_DEV_READY);
436*4882a593Smuzhiyun return SCI_SUCCESS;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
sci_remote_device_frame_handler(struct isci_remote_device * idev,u32 frame_index)439*4882a593Smuzhiyun enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
440*4882a593Smuzhiyun u32 frame_index)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
443*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
444*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
445*4882a593Smuzhiyun enum sci_status status;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun switch (state) {
448*4882a593Smuzhiyun case SCI_DEV_INITIAL:
449*4882a593Smuzhiyun case SCI_DEV_STOPPED:
450*4882a593Smuzhiyun case SCI_DEV_STARTING:
451*4882a593Smuzhiyun case SCI_STP_DEV_IDLE:
452*4882a593Smuzhiyun case SCI_SMP_DEV_IDLE:
453*4882a593Smuzhiyun case SCI_DEV_FINAL:
454*4882a593Smuzhiyun default:
455*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
456*4882a593Smuzhiyun __func__, dev_state_name(state));
457*4882a593Smuzhiyun /* Return the frame back to the controller */
458*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
459*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
460*4882a593Smuzhiyun case SCI_DEV_READY:
461*4882a593Smuzhiyun case SCI_STP_DEV_NCQ_ERROR:
462*4882a593Smuzhiyun case SCI_STP_DEV_AWAIT_RESET:
463*4882a593Smuzhiyun case SCI_DEV_STOPPING:
464*4882a593Smuzhiyun case SCI_DEV_FAILED:
465*4882a593Smuzhiyun case SCI_DEV_RESETTING: {
466*4882a593Smuzhiyun struct isci_request *ireq;
467*4882a593Smuzhiyun struct ssp_frame_hdr hdr;
468*4882a593Smuzhiyun void *frame_header;
469*4882a593Smuzhiyun ssize_t word_cnt;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
472*4882a593Smuzhiyun frame_index,
473*4882a593Smuzhiyun &frame_header);
474*4882a593Smuzhiyun if (status != SCI_SUCCESS)
475*4882a593Smuzhiyun return status;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun word_cnt = sizeof(hdr) / sizeof(u32);
478*4882a593Smuzhiyun sci_swab32_cpy(&hdr, frame_header, word_cnt);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
481*4882a593Smuzhiyun if (ireq && ireq->target_device == idev) {
482*4882a593Smuzhiyun /* The IO request is now in charge of releasing the frame */
483*4882a593Smuzhiyun status = sci_io_request_frame_handler(ireq, frame_index);
484*4882a593Smuzhiyun } else {
485*4882a593Smuzhiyun /* We could not map this tag to a valid IO
486*4882a593Smuzhiyun * request Just toss the frame and continue
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun break;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun case SCI_STP_DEV_NCQ: {
493*4882a593Smuzhiyun struct dev_to_host_fis *hdr;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
496*4882a593Smuzhiyun frame_index,
497*4882a593Smuzhiyun (void **)&hdr);
498*4882a593Smuzhiyun if (status != SCI_SUCCESS)
499*4882a593Smuzhiyun return status;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (hdr->fis_type == FIS_SETDEVBITS &&
502*4882a593Smuzhiyun (hdr->status & ATA_ERR)) {
503*4882a593Smuzhiyun idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /* TODO Check sactive and complete associated IO if any. */
506*4882a593Smuzhiyun sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
507*4882a593Smuzhiyun } else if (hdr->fis_type == FIS_REGD2H &&
508*4882a593Smuzhiyun (hdr->status & ATA_ERR)) {
509*4882a593Smuzhiyun /*
510*4882a593Smuzhiyun * Some devices return D2H FIS when an NCQ error is detected.
511*4882a593Smuzhiyun * Treat this like an SDB error FIS ready reason.
512*4882a593Smuzhiyun */
513*4882a593Smuzhiyun idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
514*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
515*4882a593Smuzhiyun } else
516*4882a593Smuzhiyun status = SCI_FAILURE;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
519*4882a593Smuzhiyun break;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun case SCI_STP_DEV_CMD:
522*4882a593Smuzhiyun case SCI_SMP_DEV_CMD:
523*4882a593Smuzhiyun /* The device does not process any UF received from the hardware while
524*4882a593Smuzhiyun * in this state. All unsolicited frames are forwarded to the io request
525*4882a593Smuzhiyun * object.
526*4882a593Smuzhiyun */
527*4882a593Smuzhiyun status = sci_io_request_frame_handler(idev->working_request, frame_index);
528*4882a593Smuzhiyun break;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun return status;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
is_remote_device_ready(struct isci_remote_device * idev)534*4882a593Smuzhiyun static bool is_remote_device_ready(struct isci_remote_device *idev)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
538*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun switch (state) {
541*4882a593Smuzhiyun case SCI_DEV_READY:
542*4882a593Smuzhiyun case SCI_STP_DEV_IDLE:
543*4882a593Smuzhiyun case SCI_STP_DEV_CMD:
544*4882a593Smuzhiyun case SCI_STP_DEV_NCQ:
545*4882a593Smuzhiyun case SCI_STP_DEV_NCQ_ERROR:
546*4882a593Smuzhiyun case SCI_STP_DEV_AWAIT_RESET:
547*4882a593Smuzhiyun case SCI_SMP_DEV_IDLE:
548*4882a593Smuzhiyun case SCI_SMP_DEV_CMD:
549*4882a593Smuzhiyun return true;
550*4882a593Smuzhiyun default:
551*4882a593Smuzhiyun return false;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun * called once the remote node context has transisitioned to a ready
557*4882a593Smuzhiyun * state (after suspending RX and/or TX due to early D2H fis)
558*4882a593Smuzhiyun */
atapi_remote_device_resume_done(void * _dev)559*4882a593Smuzhiyun static void atapi_remote_device_resume_done(void *_dev)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct isci_remote_device *idev = _dev;
562*4882a593Smuzhiyun struct isci_request *ireq = idev->working_request;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
sci_remote_device_event_handler(struct isci_remote_device * idev,u32 event_code)567*4882a593Smuzhiyun enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
568*4882a593Smuzhiyun u32 event_code)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun enum sci_status status;
571*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
572*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun switch (scu_get_event_type(event_code)) {
575*4882a593Smuzhiyun case SCU_EVENT_TYPE_RNC_OPS_MISC:
576*4882a593Smuzhiyun case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
577*4882a593Smuzhiyun case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
578*4882a593Smuzhiyun status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
579*4882a593Smuzhiyun break;
580*4882a593Smuzhiyun case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
581*4882a593Smuzhiyun if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
582*4882a593Smuzhiyun status = SCI_SUCCESS;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* Suspend the associated RNC */
585*4882a593Smuzhiyun sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun dev_dbg(scirdev_to_dev(idev),
588*4882a593Smuzhiyun "%s: device: %p event code: %x: %s\n",
589*4882a593Smuzhiyun __func__, idev, event_code,
590*4882a593Smuzhiyun is_remote_device_ready(idev)
591*4882a593Smuzhiyun ? "I_T_Nexus_Timeout event"
592*4882a593Smuzhiyun : "I_T_Nexus_Timeout event in wrong state");
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun break;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun fallthrough; /* and treat as unhandled */
597*4882a593Smuzhiyun default:
598*4882a593Smuzhiyun dev_dbg(scirdev_to_dev(idev),
599*4882a593Smuzhiyun "%s: device: %p event code: %x: %s\n",
600*4882a593Smuzhiyun __func__, idev, event_code,
601*4882a593Smuzhiyun is_remote_device_ready(idev)
602*4882a593Smuzhiyun ? "unexpected event"
603*4882a593Smuzhiyun : "unexpected event in wrong state");
604*4882a593Smuzhiyun status = SCI_FAILURE_INVALID_STATE;
605*4882a593Smuzhiyun break;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (status != SCI_SUCCESS)
609*4882a593Smuzhiyun return status;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* Decode device-specific states that may require an RNC resume during
612*4882a593Smuzhiyun * normal operation. When the abort path is active, these resumes are
613*4882a593Smuzhiyun * managed when the abort path exits.
614*4882a593Smuzhiyun */
615*4882a593Smuzhiyun if (state == SCI_STP_DEV_ATAPI_ERROR) {
616*4882a593Smuzhiyun /* For ATAPI error state resume the RNC right away. */
617*4882a593Smuzhiyun if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
618*4882a593Smuzhiyun scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
619*4882a593Smuzhiyun return sci_remote_node_context_resume(&idev->rnc,
620*4882a593Smuzhiyun atapi_remote_device_resume_done,
621*4882a593Smuzhiyun idev);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun if (state == SCI_STP_DEV_IDLE) {
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* We pick up suspension events to handle specifically to this
628*4882a593Smuzhiyun * state. We resume the RNC right away.
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
631*4882a593Smuzhiyun scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
632*4882a593Smuzhiyun status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun return status;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
sci_remote_device_start_request(struct isci_remote_device * idev,struct isci_request * ireq,enum sci_status status)638*4882a593Smuzhiyun static void sci_remote_device_start_request(struct isci_remote_device *idev,
639*4882a593Smuzhiyun struct isci_request *ireq,
640*4882a593Smuzhiyun enum sci_status status)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun struct isci_port *iport = idev->owning_port;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /* cleanup requests that failed after starting on the port */
645*4882a593Smuzhiyun if (status != SCI_SUCCESS)
646*4882a593Smuzhiyun sci_port_complete_io(iport, idev, ireq);
647*4882a593Smuzhiyun else {
648*4882a593Smuzhiyun kref_get(&idev->kref);
649*4882a593Smuzhiyun idev->started_request_count++;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
sci_remote_device_start_io(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)653*4882a593Smuzhiyun enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
654*4882a593Smuzhiyun struct isci_remote_device *idev,
655*4882a593Smuzhiyun struct isci_request *ireq)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
658*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
659*4882a593Smuzhiyun struct isci_port *iport = idev->owning_port;
660*4882a593Smuzhiyun enum sci_status status;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun switch (state) {
663*4882a593Smuzhiyun case SCI_DEV_INITIAL:
664*4882a593Smuzhiyun case SCI_DEV_STOPPED:
665*4882a593Smuzhiyun case SCI_DEV_STARTING:
666*4882a593Smuzhiyun case SCI_STP_DEV_NCQ_ERROR:
667*4882a593Smuzhiyun case SCI_DEV_STOPPING:
668*4882a593Smuzhiyun case SCI_DEV_FAILED:
669*4882a593Smuzhiyun case SCI_DEV_RESETTING:
670*4882a593Smuzhiyun case SCI_DEV_FINAL:
671*4882a593Smuzhiyun default:
672*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
673*4882a593Smuzhiyun __func__, dev_state_name(state));
674*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
675*4882a593Smuzhiyun case SCI_DEV_READY:
676*4882a593Smuzhiyun /* attempt to start an io request for this device object. The remote
677*4882a593Smuzhiyun * device object will issue the start request for the io and if
678*4882a593Smuzhiyun * successful it will start the request for the port object then
679*4882a593Smuzhiyun * increment its own request count.
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun status = sci_port_start_io(iport, idev, ireq);
682*4882a593Smuzhiyun if (status != SCI_SUCCESS)
683*4882a593Smuzhiyun return status;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun status = sci_remote_node_context_start_io(&idev->rnc, ireq);
686*4882a593Smuzhiyun if (status != SCI_SUCCESS)
687*4882a593Smuzhiyun break;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun status = sci_request_start(ireq);
690*4882a593Smuzhiyun break;
691*4882a593Smuzhiyun case SCI_STP_DEV_IDLE: {
692*4882a593Smuzhiyun /* handle the start io operation for a sata device that is in
693*4882a593Smuzhiyun * the command idle state. - Evalute the type of IO request to
694*4882a593Smuzhiyun * be started - If its an NCQ request change to NCQ substate -
695*4882a593Smuzhiyun * If its any other command change to the CMD substate
696*4882a593Smuzhiyun *
697*4882a593Smuzhiyun * If this is a softreset we may want to have a different
698*4882a593Smuzhiyun * substate.
699*4882a593Smuzhiyun */
700*4882a593Smuzhiyun enum sci_remote_device_states new_state;
701*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun status = sci_port_start_io(iport, idev, ireq);
704*4882a593Smuzhiyun if (status != SCI_SUCCESS)
705*4882a593Smuzhiyun return status;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun status = sci_remote_node_context_start_io(&idev->rnc, ireq);
708*4882a593Smuzhiyun if (status != SCI_SUCCESS)
709*4882a593Smuzhiyun break;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun status = sci_request_start(ireq);
712*4882a593Smuzhiyun if (status != SCI_SUCCESS)
713*4882a593Smuzhiyun break;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (task->ata_task.use_ncq)
716*4882a593Smuzhiyun new_state = SCI_STP_DEV_NCQ;
717*4882a593Smuzhiyun else {
718*4882a593Smuzhiyun idev->working_request = ireq;
719*4882a593Smuzhiyun new_state = SCI_STP_DEV_CMD;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun sci_change_state(sm, new_state);
722*4882a593Smuzhiyun break;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun case SCI_STP_DEV_NCQ: {
725*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (task->ata_task.use_ncq) {
728*4882a593Smuzhiyun status = sci_port_start_io(iport, idev, ireq);
729*4882a593Smuzhiyun if (status != SCI_SUCCESS)
730*4882a593Smuzhiyun return status;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun status = sci_remote_node_context_start_io(&idev->rnc, ireq);
733*4882a593Smuzhiyun if (status != SCI_SUCCESS)
734*4882a593Smuzhiyun break;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun status = sci_request_start(ireq);
737*4882a593Smuzhiyun } else
738*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
739*4882a593Smuzhiyun break;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun case SCI_STP_DEV_AWAIT_RESET:
742*4882a593Smuzhiyun return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
743*4882a593Smuzhiyun case SCI_SMP_DEV_IDLE:
744*4882a593Smuzhiyun status = sci_port_start_io(iport, idev, ireq);
745*4882a593Smuzhiyun if (status != SCI_SUCCESS)
746*4882a593Smuzhiyun return status;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun status = sci_remote_node_context_start_io(&idev->rnc, ireq);
749*4882a593Smuzhiyun if (status != SCI_SUCCESS)
750*4882a593Smuzhiyun break;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun status = sci_request_start(ireq);
753*4882a593Smuzhiyun if (status != SCI_SUCCESS)
754*4882a593Smuzhiyun break;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun idev->working_request = ireq;
757*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
758*4882a593Smuzhiyun break;
759*4882a593Smuzhiyun case SCI_STP_DEV_CMD:
760*4882a593Smuzhiyun case SCI_SMP_DEV_CMD:
761*4882a593Smuzhiyun /* device is already handling a command it can not accept new commands
762*4882a593Smuzhiyun * until this one is complete.
763*4882a593Smuzhiyun */
764*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun sci_remote_device_start_request(idev, ireq, status);
768*4882a593Smuzhiyun return status;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
common_complete_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)771*4882a593Smuzhiyun static enum sci_status common_complete_io(struct isci_port *iport,
772*4882a593Smuzhiyun struct isci_remote_device *idev,
773*4882a593Smuzhiyun struct isci_request *ireq)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun enum sci_status status;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun status = sci_request_complete(ireq);
778*4882a593Smuzhiyun if (status != SCI_SUCCESS)
779*4882a593Smuzhiyun return status;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun status = sci_port_complete_io(iport, idev, ireq);
782*4882a593Smuzhiyun if (status != SCI_SUCCESS)
783*4882a593Smuzhiyun return status;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun sci_remote_device_decrement_request_count(idev);
786*4882a593Smuzhiyun return status;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
sci_remote_device_complete_io(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)789*4882a593Smuzhiyun enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
790*4882a593Smuzhiyun struct isci_remote_device *idev,
791*4882a593Smuzhiyun struct isci_request *ireq)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
794*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
795*4882a593Smuzhiyun struct isci_port *iport = idev->owning_port;
796*4882a593Smuzhiyun enum sci_status status;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun switch (state) {
799*4882a593Smuzhiyun case SCI_DEV_INITIAL:
800*4882a593Smuzhiyun case SCI_DEV_STOPPED:
801*4882a593Smuzhiyun case SCI_DEV_STARTING:
802*4882a593Smuzhiyun case SCI_STP_DEV_IDLE:
803*4882a593Smuzhiyun case SCI_SMP_DEV_IDLE:
804*4882a593Smuzhiyun case SCI_DEV_FAILED:
805*4882a593Smuzhiyun case SCI_DEV_FINAL:
806*4882a593Smuzhiyun default:
807*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
808*4882a593Smuzhiyun __func__, dev_state_name(state));
809*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
810*4882a593Smuzhiyun case SCI_DEV_READY:
811*4882a593Smuzhiyun case SCI_STP_DEV_AWAIT_RESET:
812*4882a593Smuzhiyun case SCI_DEV_RESETTING:
813*4882a593Smuzhiyun status = common_complete_io(iport, idev, ireq);
814*4882a593Smuzhiyun break;
815*4882a593Smuzhiyun case SCI_STP_DEV_CMD:
816*4882a593Smuzhiyun case SCI_STP_DEV_NCQ:
817*4882a593Smuzhiyun case SCI_STP_DEV_NCQ_ERROR:
818*4882a593Smuzhiyun case SCI_STP_DEV_ATAPI_ERROR:
819*4882a593Smuzhiyun status = common_complete_io(iport, idev, ireq);
820*4882a593Smuzhiyun if (status != SCI_SUCCESS)
821*4882a593Smuzhiyun break;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
824*4882a593Smuzhiyun /* This request causes hardware error, device needs to be Lun Reset.
825*4882a593Smuzhiyun * So here we force the state machine to IDLE state so the rest IOs
826*4882a593Smuzhiyun * can reach RNC state handler, these IOs will be completed by RNC with
827*4882a593Smuzhiyun * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
828*4882a593Smuzhiyun */
829*4882a593Smuzhiyun sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
830*4882a593Smuzhiyun } else if (idev->started_request_count == 0)
831*4882a593Smuzhiyun sci_change_state(sm, SCI_STP_DEV_IDLE);
832*4882a593Smuzhiyun break;
833*4882a593Smuzhiyun case SCI_SMP_DEV_CMD:
834*4882a593Smuzhiyun status = common_complete_io(iport, idev, ireq);
835*4882a593Smuzhiyun if (status != SCI_SUCCESS)
836*4882a593Smuzhiyun break;
837*4882a593Smuzhiyun sci_change_state(sm, SCI_SMP_DEV_IDLE);
838*4882a593Smuzhiyun break;
839*4882a593Smuzhiyun case SCI_DEV_STOPPING:
840*4882a593Smuzhiyun status = common_complete_io(iport, idev, ireq);
841*4882a593Smuzhiyun if (status != SCI_SUCCESS)
842*4882a593Smuzhiyun break;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (idev->started_request_count == 0)
845*4882a593Smuzhiyun sci_remote_node_context_destruct(&idev->rnc,
846*4882a593Smuzhiyun rnc_destruct_done,
847*4882a593Smuzhiyun idev);
848*4882a593Smuzhiyun break;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (status != SCI_SUCCESS)
852*4882a593Smuzhiyun dev_err(scirdev_to_dev(idev),
853*4882a593Smuzhiyun "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
854*4882a593Smuzhiyun "could not complete\n", __func__, iport,
855*4882a593Smuzhiyun idev, ireq, status);
856*4882a593Smuzhiyun else
857*4882a593Smuzhiyun isci_put_device(idev);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun return status;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
sci_remote_device_continue_request(void * dev)862*4882a593Smuzhiyun static void sci_remote_device_continue_request(void *dev)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun struct isci_remote_device *idev = dev;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* we need to check if this request is still valid to continue. */
867*4882a593Smuzhiyun if (idev->working_request)
868*4882a593Smuzhiyun sci_controller_continue_io(idev->working_request);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
sci_remote_device_start_task(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)871*4882a593Smuzhiyun enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
872*4882a593Smuzhiyun struct isci_remote_device *idev,
873*4882a593Smuzhiyun struct isci_request *ireq)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
876*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
877*4882a593Smuzhiyun struct isci_port *iport = idev->owning_port;
878*4882a593Smuzhiyun enum sci_status status;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun switch (state) {
881*4882a593Smuzhiyun case SCI_DEV_INITIAL:
882*4882a593Smuzhiyun case SCI_DEV_STOPPED:
883*4882a593Smuzhiyun case SCI_DEV_STARTING:
884*4882a593Smuzhiyun case SCI_SMP_DEV_IDLE:
885*4882a593Smuzhiyun case SCI_SMP_DEV_CMD:
886*4882a593Smuzhiyun case SCI_DEV_STOPPING:
887*4882a593Smuzhiyun case SCI_DEV_FAILED:
888*4882a593Smuzhiyun case SCI_DEV_RESETTING:
889*4882a593Smuzhiyun case SCI_DEV_FINAL:
890*4882a593Smuzhiyun default:
891*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
892*4882a593Smuzhiyun __func__, dev_state_name(state));
893*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
894*4882a593Smuzhiyun case SCI_STP_DEV_IDLE:
895*4882a593Smuzhiyun case SCI_STP_DEV_CMD:
896*4882a593Smuzhiyun case SCI_STP_DEV_NCQ:
897*4882a593Smuzhiyun case SCI_STP_DEV_NCQ_ERROR:
898*4882a593Smuzhiyun case SCI_STP_DEV_AWAIT_RESET:
899*4882a593Smuzhiyun status = sci_port_start_io(iport, idev, ireq);
900*4882a593Smuzhiyun if (status != SCI_SUCCESS)
901*4882a593Smuzhiyun return status;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun status = sci_request_start(ireq);
904*4882a593Smuzhiyun if (status != SCI_SUCCESS)
905*4882a593Smuzhiyun goto out;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Note: If the remote device state is not IDLE this will
908*4882a593Smuzhiyun * replace the request that probably resulted in the task
909*4882a593Smuzhiyun * management request.
910*4882a593Smuzhiyun */
911*4882a593Smuzhiyun idev->working_request = ireq;
912*4882a593Smuzhiyun sci_change_state(sm, SCI_STP_DEV_CMD);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /* The remote node context must cleanup the TCi to NCQ mapping
915*4882a593Smuzhiyun * table. The only way to do this correctly is to either write
916*4882a593Smuzhiyun * to the TLCR register or to invalidate and repost the RNC. In
917*4882a593Smuzhiyun * either case the remote node context state machine will take
918*4882a593Smuzhiyun * the correct action when the remote node context is suspended
919*4882a593Smuzhiyun * and later resumed.
920*4882a593Smuzhiyun */
921*4882a593Smuzhiyun sci_remote_device_suspend(idev,
922*4882a593Smuzhiyun SCI_SW_SUSPEND_LINKHANG_DETECT);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun status = sci_remote_node_context_start_task(&idev->rnc, ireq,
925*4882a593Smuzhiyun sci_remote_device_continue_request, idev);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun out:
928*4882a593Smuzhiyun sci_remote_device_start_request(idev, ireq, status);
929*4882a593Smuzhiyun /* We need to let the controller start request handler know that
930*4882a593Smuzhiyun * it can't post TC yet. We will provide a callback function to
931*4882a593Smuzhiyun * post TC when RNC gets resumed.
932*4882a593Smuzhiyun */
933*4882a593Smuzhiyun return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
934*4882a593Smuzhiyun case SCI_DEV_READY:
935*4882a593Smuzhiyun status = sci_port_start_io(iport, idev, ireq);
936*4882a593Smuzhiyun if (status != SCI_SUCCESS)
937*4882a593Smuzhiyun return status;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun /* Resume the RNC as needed: */
940*4882a593Smuzhiyun status = sci_remote_node_context_start_task(&idev->rnc, ireq,
941*4882a593Smuzhiyun NULL, NULL);
942*4882a593Smuzhiyun if (status != SCI_SUCCESS)
943*4882a593Smuzhiyun break;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun status = sci_request_start(ireq);
946*4882a593Smuzhiyun break;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun sci_remote_device_start_request(idev, ireq, status);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun return status;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun
sci_remote_device_post_request(struct isci_remote_device * idev,u32 request)953*4882a593Smuzhiyun void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun struct isci_port *iport = idev->owning_port;
956*4882a593Smuzhiyun u32 context;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun context = request |
959*4882a593Smuzhiyun (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
960*4882a593Smuzhiyun (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
961*4882a593Smuzhiyun idev->rnc.remote_node_index;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun sci_controller_post_request(iport->owning_controller, context);
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /* called once the remote node context has transisitioned to a
967*4882a593Smuzhiyun * ready state. This is the indication that the remote device object can also
968*4882a593Smuzhiyun * transition to ready.
969*4882a593Smuzhiyun */
remote_device_resume_done(void * _dev)970*4882a593Smuzhiyun static void remote_device_resume_done(void *_dev)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun struct isci_remote_device *idev = _dev;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun if (is_remote_device_ready(idev))
975*4882a593Smuzhiyun return;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /* go 'ready' if we are not already in a ready state */
978*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_DEV_READY);
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void * _dev)981*4882a593Smuzhiyun static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun struct isci_remote_device *idev = _dev;
984*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* For NCQ operation we do not issue a isci_remote_device_not_ready().
987*4882a593Smuzhiyun * As a result, avoid sending the ready notification.
988*4882a593Smuzhiyun */
989*4882a593Smuzhiyun if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
990*4882a593Smuzhiyun isci_remote_device_ready(ihost, idev);
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
sci_remote_device_initial_state_enter(struct sci_base_state_machine * sm)993*4882a593Smuzhiyun static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /* Initial state is a transitional state to the stopped state */
998*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_DEV_STOPPED);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /**
1002*4882a593Smuzhiyun * sci_remote_device_destruct() - free remote node context and destruct
1003*4882a593Smuzhiyun * @remote_device: This parameter specifies the remote device to be destructed.
1004*4882a593Smuzhiyun *
1005*4882a593Smuzhiyun * Remote device objects are a limited resource. As such, they must be
1006*4882a593Smuzhiyun * protected. Thus calls to construct and destruct are mutually exclusive and
1007*4882a593Smuzhiyun * non-reentrant. The return value shall indicate if the device was
1008*4882a593Smuzhiyun * successfully destructed or if some failure occurred. enum sci_status This value
1009*4882a593Smuzhiyun * is returned if the device is successfully destructed.
1010*4882a593Smuzhiyun * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
1011*4882a593Smuzhiyun * device isn't valid (e.g. it's already been destoryed, the handle isn't
1012*4882a593Smuzhiyun * valid, etc.).
1013*4882a593Smuzhiyun */
sci_remote_device_destruct(struct isci_remote_device * idev)1014*4882a593Smuzhiyun static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
1017*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
1018*4882a593Smuzhiyun struct isci_host *ihost;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun if (state != SCI_DEV_STOPPED) {
1021*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1022*4882a593Smuzhiyun __func__, dev_state_name(state));
1023*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun ihost = idev->owning_port->owning_controller;
1027*4882a593Smuzhiyun sci_controller_free_remote_node_context(ihost, idev,
1028*4882a593Smuzhiyun idev->rnc.remote_node_index);
1029*4882a593Smuzhiyun idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
1030*4882a593Smuzhiyun sci_change_state(sm, SCI_DEV_FINAL);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun return SCI_SUCCESS;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /**
1036*4882a593Smuzhiyun * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
1037*4882a593Smuzhiyun * @ihost: This parameter specifies the isci host object.
1038*4882a593Smuzhiyun * @idev: This parameter specifies the remote device to be freed.
1039*4882a593Smuzhiyun *
1040*4882a593Smuzhiyun */
isci_remote_device_deconstruct(struct isci_host * ihost,struct isci_remote_device * idev)1041*4882a593Smuzhiyun static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
1044*4882a593Smuzhiyun "%s: isci_device = %p\n", __func__, idev);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* There should not be any outstanding io's. All paths to
1047*4882a593Smuzhiyun * here should go through isci_remote_device_nuke_requests.
1048*4882a593Smuzhiyun * If we hit this condition, we will need a way to complete
1049*4882a593Smuzhiyun * io requests in process */
1050*4882a593Smuzhiyun BUG_ON(idev->started_request_count > 0);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun sci_remote_device_destruct(idev);
1053*4882a593Smuzhiyun list_del_init(&idev->node);
1054*4882a593Smuzhiyun isci_put_device(idev);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
sci_remote_device_stopped_state_enter(struct sci_base_state_machine * sm)1057*4882a593Smuzhiyun static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1060*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1061*4882a593Smuzhiyun u32 prev_state;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /* If we are entering from the stopping state let the SCI User know that
1064*4882a593Smuzhiyun * the stop operation has completed.
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun prev_state = idev->sm.previous_state_id;
1067*4882a593Smuzhiyun if (prev_state == SCI_DEV_STOPPING)
1068*4882a593Smuzhiyun isci_remote_device_deconstruct(ihost, idev);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun sci_controller_remote_device_stopped(ihost, idev);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
sci_remote_device_starting_state_enter(struct sci_base_state_machine * sm)1073*4882a593Smuzhiyun static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1076*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun isci_remote_device_not_ready(ihost, idev,
1079*4882a593Smuzhiyun SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
sci_remote_device_ready_state_enter(struct sci_base_state_machine * sm)1082*4882a593Smuzhiyun static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1085*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1086*4882a593Smuzhiyun struct domain_device *dev = idev->domain_dev;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1089*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1090*4882a593Smuzhiyun } else if (dev_is_expander(dev->dev_type)) {
1091*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
1092*4882a593Smuzhiyun } else
1093*4882a593Smuzhiyun isci_remote_device_ready(ihost, idev);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
sci_remote_device_ready_state_exit(struct sci_base_state_machine * sm)1096*4882a593Smuzhiyun static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1099*4882a593Smuzhiyun struct domain_device *dev = idev->domain_dev;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun if (dev->dev_type == SAS_END_DEVICE) {
1102*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun isci_remote_device_not_ready(ihost, idev,
1105*4882a593Smuzhiyun SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
sci_remote_device_resetting_state_enter(struct sci_base_state_machine * sm)1109*4882a593Smuzhiyun static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1112*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
1115*4882a593Smuzhiyun "%s: isci_device = %p\n", __func__, idev);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
sci_remote_device_resetting_state_exit(struct sci_base_state_machine * sm)1120*4882a593Smuzhiyun static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
1121*4882a593Smuzhiyun {
1122*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1123*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
1126*4882a593Smuzhiyun "%s: isci_device = %p\n", __func__, idev);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine * sm)1131*4882a593Smuzhiyun static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun idev->working_request = NULL;
1136*4882a593Smuzhiyun if (sci_remote_node_context_is_ready(&idev->rnc)) {
1137*4882a593Smuzhiyun /*
1138*4882a593Smuzhiyun * Since the RNC is ready, it's alright to finish completion
1139*4882a593Smuzhiyun * processing (e.g. signal the remote device is ready). */
1140*4882a593Smuzhiyun sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
1141*4882a593Smuzhiyun } else {
1142*4882a593Smuzhiyun sci_remote_node_context_resume(&idev->rnc,
1143*4882a593Smuzhiyun sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
1144*4882a593Smuzhiyun idev);
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun
sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine * sm)1148*4882a593Smuzhiyun static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1149*4882a593Smuzhiyun {
1150*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1151*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun BUG_ON(idev->working_request == NULL);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun isci_remote_device_not_ready(ihost, idev,
1156*4882a593Smuzhiyun SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine * sm)1159*4882a593Smuzhiyun static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1162*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
1165*4882a593Smuzhiyun isci_remote_device_not_ready(ihost, idev,
1166*4882a593Smuzhiyun idev->not_ready_reason);
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine * sm)1169*4882a593Smuzhiyun static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1172*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun isci_remote_device_ready(ihost, idev);
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun
sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine * sm)1177*4882a593Smuzhiyun static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1180*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun BUG_ON(idev->working_request == NULL);
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun isci_remote_device_not_ready(ihost, idev,
1185*4882a593Smuzhiyun SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine * sm)1188*4882a593Smuzhiyun static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun idev->working_request = NULL;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun static const struct sci_base_state sci_remote_device_state_table[] = {
1196*4882a593Smuzhiyun [SCI_DEV_INITIAL] = {
1197*4882a593Smuzhiyun .enter_state = sci_remote_device_initial_state_enter,
1198*4882a593Smuzhiyun },
1199*4882a593Smuzhiyun [SCI_DEV_STOPPED] = {
1200*4882a593Smuzhiyun .enter_state = sci_remote_device_stopped_state_enter,
1201*4882a593Smuzhiyun },
1202*4882a593Smuzhiyun [SCI_DEV_STARTING] = {
1203*4882a593Smuzhiyun .enter_state = sci_remote_device_starting_state_enter,
1204*4882a593Smuzhiyun },
1205*4882a593Smuzhiyun [SCI_DEV_READY] = {
1206*4882a593Smuzhiyun .enter_state = sci_remote_device_ready_state_enter,
1207*4882a593Smuzhiyun .exit_state = sci_remote_device_ready_state_exit
1208*4882a593Smuzhiyun },
1209*4882a593Smuzhiyun [SCI_STP_DEV_IDLE] = {
1210*4882a593Smuzhiyun .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1211*4882a593Smuzhiyun },
1212*4882a593Smuzhiyun [SCI_STP_DEV_CMD] = {
1213*4882a593Smuzhiyun .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1214*4882a593Smuzhiyun },
1215*4882a593Smuzhiyun [SCI_STP_DEV_NCQ] = { },
1216*4882a593Smuzhiyun [SCI_STP_DEV_NCQ_ERROR] = {
1217*4882a593Smuzhiyun .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1218*4882a593Smuzhiyun },
1219*4882a593Smuzhiyun [SCI_STP_DEV_ATAPI_ERROR] = { },
1220*4882a593Smuzhiyun [SCI_STP_DEV_AWAIT_RESET] = { },
1221*4882a593Smuzhiyun [SCI_SMP_DEV_IDLE] = {
1222*4882a593Smuzhiyun .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1223*4882a593Smuzhiyun },
1224*4882a593Smuzhiyun [SCI_SMP_DEV_CMD] = {
1225*4882a593Smuzhiyun .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1226*4882a593Smuzhiyun .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
1227*4882a593Smuzhiyun },
1228*4882a593Smuzhiyun [SCI_DEV_STOPPING] = { },
1229*4882a593Smuzhiyun [SCI_DEV_FAILED] = { },
1230*4882a593Smuzhiyun [SCI_DEV_RESETTING] = {
1231*4882a593Smuzhiyun .enter_state = sci_remote_device_resetting_state_enter,
1232*4882a593Smuzhiyun .exit_state = sci_remote_device_resetting_state_exit
1233*4882a593Smuzhiyun },
1234*4882a593Smuzhiyun [SCI_DEV_FINAL] = { },
1235*4882a593Smuzhiyun };
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /**
1238*4882a593Smuzhiyun * sci_remote_device_construct() - common construction
1239*4882a593Smuzhiyun * @sci_port: SAS/SATA port through which this device is accessed.
1240*4882a593Smuzhiyun * @sci_dev: remote device to construct
1241*4882a593Smuzhiyun *
1242*4882a593Smuzhiyun * This routine just performs benign initialization and does not
1243*4882a593Smuzhiyun * allocate the remote_node_context which is left to
1244*4882a593Smuzhiyun * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
1245*4882a593Smuzhiyun * frees the remote_node_context(s) for the device.
1246*4882a593Smuzhiyun */
sci_remote_device_construct(struct isci_port * iport,struct isci_remote_device * idev)1247*4882a593Smuzhiyun static void sci_remote_device_construct(struct isci_port *iport,
1248*4882a593Smuzhiyun struct isci_remote_device *idev)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun idev->owning_port = iport;
1251*4882a593Smuzhiyun idev->started_request_count = 0;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun sci_remote_node_context_construct(&idev->rnc,
1256*4882a593Smuzhiyun SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /**
1260*4882a593Smuzhiyun * sci_remote_device_da_construct() - construct direct attached device.
1261*4882a593Smuzhiyun *
1262*4882a593Smuzhiyun * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1263*4882a593Smuzhiyun * the device is known to the SCI Core since it is contained in the
1264*4882a593Smuzhiyun * sci_phy object. Remote node context(s) is/are a global resource
1265*4882a593Smuzhiyun * allocated by this routine, freed by sci_remote_device_destruct().
1266*4882a593Smuzhiyun *
1267*4882a593Smuzhiyun * Returns:
1268*4882a593Smuzhiyun * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1269*4882a593Smuzhiyun * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1270*4882a593Smuzhiyun * sata-only controller instance.
1271*4882a593Smuzhiyun * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1272*4882a593Smuzhiyun */
sci_remote_device_da_construct(struct isci_port * iport,struct isci_remote_device * idev)1273*4882a593Smuzhiyun static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1274*4882a593Smuzhiyun struct isci_remote_device *idev)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun enum sci_status status;
1277*4882a593Smuzhiyun struct sci_port_properties properties;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun sci_remote_device_construct(iport, idev);
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun sci_port_get_properties(iport, &properties);
1282*4882a593Smuzhiyun /* Get accurate port width from port's phy mask for a DA device. */
1283*4882a593Smuzhiyun idev->device_port_width = hweight32(properties.phy_mask);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1286*4882a593Smuzhiyun idev,
1287*4882a593Smuzhiyun &idev->rnc.remote_node_index);
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1290*4882a593Smuzhiyun return status;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun return SCI_SUCCESS;
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun /**
1298*4882a593Smuzhiyun * sci_remote_device_ea_construct() - construct expander attached device
1299*4882a593Smuzhiyun *
1300*4882a593Smuzhiyun * Remote node context(s) is/are a global resource allocated by this
1301*4882a593Smuzhiyun * routine, freed by sci_remote_device_destruct().
1302*4882a593Smuzhiyun *
1303*4882a593Smuzhiyun * Returns:
1304*4882a593Smuzhiyun * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1305*4882a593Smuzhiyun * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1306*4882a593Smuzhiyun * sata-only controller instance.
1307*4882a593Smuzhiyun * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1308*4882a593Smuzhiyun */
sci_remote_device_ea_construct(struct isci_port * iport,struct isci_remote_device * idev)1309*4882a593Smuzhiyun static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1310*4882a593Smuzhiyun struct isci_remote_device *idev)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun struct domain_device *dev = idev->domain_dev;
1313*4882a593Smuzhiyun enum sci_status status;
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun sci_remote_device_construct(iport, idev);
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1318*4882a593Smuzhiyun idev,
1319*4882a593Smuzhiyun &idev->rnc.remote_node_index);
1320*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1321*4882a593Smuzhiyun return status;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun /* For SAS-2 the physical link rate is actually a logical link
1324*4882a593Smuzhiyun * rate that incorporates multiplexing. The SCU doesn't
1325*4882a593Smuzhiyun * incorporate multiplexing and for the purposes of the
1326*4882a593Smuzhiyun * connection the logical link rate is that same as the
1327*4882a593Smuzhiyun * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1328*4882a593Smuzhiyun * one another, so this code works for both situations.
1329*4882a593Smuzhiyun */
1330*4882a593Smuzhiyun idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1331*4882a593Smuzhiyun dev->linkrate);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun /* / @todo Should I assign the port width by reading all of the phys on the port? */
1334*4882a593Smuzhiyun idev->device_port_width = 1;
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun return SCI_SUCCESS;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun
sci_remote_device_resume(struct isci_remote_device * idev,scics_sds_remote_node_context_callback cb_fn,void * cb_p)1339*4882a593Smuzhiyun enum sci_status sci_remote_device_resume(
1340*4882a593Smuzhiyun struct isci_remote_device *idev,
1341*4882a593Smuzhiyun scics_sds_remote_node_context_callback cb_fn,
1342*4882a593Smuzhiyun void *cb_p)
1343*4882a593Smuzhiyun {
1344*4882a593Smuzhiyun enum sci_status status;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1347*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1348*4882a593Smuzhiyun dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1349*4882a593Smuzhiyun __func__, status);
1350*4882a593Smuzhiyun return status;
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun
isci_remote_device_resume_from_abort_complete(void * cbparam)1353*4882a593Smuzhiyun static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1354*4882a593Smuzhiyun {
1355*4882a593Smuzhiyun struct isci_remote_device *idev = cbparam;
1356*4882a593Smuzhiyun struct isci_host *ihost = idev->owning_port->owning_controller;
1357*4882a593Smuzhiyun scics_sds_remote_node_context_callback abort_resume_cb =
1358*4882a593Smuzhiyun idev->abort_resume_cb;
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1361*4882a593Smuzhiyun __func__, abort_resume_cb);
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun if (abort_resume_cb != NULL) {
1364*4882a593Smuzhiyun idev->abort_resume_cb = NULL;
1365*4882a593Smuzhiyun abort_resume_cb(idev->abort_resume_cbparam);
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1368*4882a593Smuzhiyun wake_up(&ihost->eventq);
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun
isci_remote_device_test_resume_done(struct isci_host * ihost,struct isci_remote_device * idev)1371*4882a593Smuzhiyun static bool isci_remote_device_test_resume_done(
1372*4882a593Smuzhiyun struct isci_host *ihost,
1373*4882a593Smuzhiyun struct isci_remote_device *idev)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun unsigned long flags;
1376*4882a593Smuzhiyun bool done;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
1379*4882a593Smuzhiyun done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1380*4882a593Smuzhiyun || test_bit(IDEV_STOP_PENDING, &idev->flags)
1381*4882a593Smuzhiyun || sci_remote_node_context_is_being_destroyed(&idev->rnc);
1382*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun return done;
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun
isci_remote_device_wait_for_resume_from_abort(struct isci_host * ihost,struct isci_remote_device * idev)1387*4882a593Smuzhiyun void isci_remote_device_wait_for_resume_from_abort(
1388*4882a593Smuzhiyun struct isci_host *ihost,
1389*4882a593Smuzhiyun struct isci_remote_device *idev)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1392*4882a593Smuzhiyun __func__, idev);
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun #define MAX_RESUME_MSECS 10000
1395*4882a593Smuzhiyun if (!wait_event_timeout(ihost->eventq,
1396*4882a593Smuzhiyun isci_remote_device_test_resume_done(ihost, idev),
1397*4882a593Smuzhiyun msecs_to_jiffies(MAX_RESUME_MSECS))) {
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1400*4882a593Smuzhiyun "resume: %p\n", __func__, idev);
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1405*4882a593Smuzhiyun __func__, idev);
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
isci_remote_device_resume_from_abort(struct isci_host * ihost,struct isci_remote_device * idev)1408*4882a593Smuzhiyun enum sci_status isci_remote_device_resume_from_abort(
1409*4882a593Smuzhiyun struct isci_host *ihost,
1410*4882a593Smuzhiyun struct isci_remote_device *idev)
1411*4882a593Smuzhiyun {
1412*4882a593Smuzhiyun unsigned long flags;
1413*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
1414*4882a593Smuzhiyun int destroyed;
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
1417*4882a593Smuzhiyun /* Preserve any current resume callbacks, for instance from other
1418*4882a593Smuzhiyun * resumptions.
1419*4882a593Smuzhiyun */
1420*4882a593Smuzhiyun idev->abort_resume_cb = idev->rnc.user_callback;
1421*4882a593Smuzhiyun idev->abort_resume_cbparam = idev->rnc.user_cookie;
1422*4882a593Smuzhiyun set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1423*4882a593Smuzhiyun clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1424*4882a593Smuzhiyun destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1425*4882a593Smuzhiyun if (!destroyed)
1426*4882a593Smuzhiyun status = sci_remote_device_resume(
1427*4882a593Smuzhiyun idev, isci_remote_device_resume_from_abort_complete,
1428*4882a593Smuzhiyun idev);
1429*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
1430*4882a593Smuzhiyun if (!destroyed && (status == SCI_SUCCESS))
1431*4882a593Smuzhiyun isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1432*4882a593Smuzhiyun else
1433*4882a593Smuzhiyun clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun return status;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun /**
1439*4882a593Smuzhiyun * sci_remote_device_start() - This method will start the supplied remote
1440*4882a593Smuzhiyun * device. This method enables normal IO requests to flow through to the
1441*4882a593Smuzhiyun * remote device.
1442*4882a593Smuzhiyun * @remote_device: This parameter specifies the device to be started.
1443*4882a593Smuzhiyun * @timeout: This parameter specifies the number of milliseconds in which the
1444*4882a593Smuzhiyun * start operation should complete.
1445*4882a593Smuzhiyun *
1446*4882a593Smuzhiyun * An indication of whether the device was successfully started. SCI_SUCCESS
1447*4882a593Smuzhiyun * This value is returned if the device was successfully started.
1448*4882a593Smuzhiyun * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1449*4882a593Smuzhiyun * the device when there have been no phys added to it.
1450*4882a593Smuzhiyun */
sci_remote_device_start(struct isci_remote_device * idev,u32 timeout)1451*4882a593Smuzhiyun static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1452*4882a593Smuzhiyun u32 timeout)
1453*4882a593Smuzhiyun {
1454*4882a593Smuzhiyun struct sci_base_state_machine *sm = &idev->sm;
1455*4882a593Smuzhiyun enum sci_remote_device_states state = sm->current_state_id;
1456*4882a593Smuzhiyun enum sci_status status;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun if (state != SCI_DEV_STOPPED) {
1459*4882a593Smuzhiyun dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1460*4882a593Smuzhiyun __func__, dev_state_name(state));
1461*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun status = sci_remote_device_resume(idev, remote_device_resume_done,
1465*4882a593Smuzhiyun idev);
1466*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1467*4882a593Smuzhiyun return status;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun sci_change_state(sm, SCI_DEV_STARTING);
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun return SCI_SUCCESS;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
isci_remote_device_construct(struct isci_port * iport,struct isci_remote_device * idev)1474*4882a593Smuzhiyun static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1475*4882a593Smuzhiyun struct isci_remote_device *idev)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun struct isci_host *ihost = iport->isci_host;
1478*4882a593Smuzhiyun struct domain_device *dev = idev->domain_dev;
1479*4882a593Smuzhiyun enum sci_status status;
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun if (dev->parent && dev_is_expander(dev->parent->dev_type))
1482*4882a593Smuzhiyun status = sci_remote_device_ea_construct(iport, idev);
1483*4882a593Smuzhiyun else
1484*4882a593Smuzhiyun status = sci_remote_device_da_construct(iport, idev);
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun if (status != SCI_SUCCESS) {
1487*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1488*4882a593Smuzhiyun __func__, status);
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun return status;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /* start the device. */
1494*4882a593Smuzhiyun status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1497*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1498*4882a593Smuzhiyun status);
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun return status;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun /**
1504*4882a593Smuzhiyun * This function builds the isci_remote_device when a libsas dev_found message
1505*4882a593Smuzhiyun * is received.
1506*4882a593Smuzhiyun * @isci_host: This parameter specifies the isci host object.
1507*4882a593Smuzhiyun * @port: This parameter specifies the isci_port connected to this device.
1508*4882a593Smuzhiyun *
1509*4882a593Smuzhiyun * pointer to new isci_remote_device.
1510*4882a593Smuzhiyun */
1511*4882a593Smuzhiyun static struct isci_remote_device *
isci_remote_device_alloc(struct isci_host * ihost,struct isci_port * iport)1512*4882a593Smuzhiyun isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1513*4882a593Smuzhiyun {
1514*4882a593Smuzhiyun struct isci_remote_device *idev;
1515*4882a593Smuzhiyun int i;
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1518*4882a593Smuzhiyun idev = &ihost->devices[i];
1519*4882a593Smuzhiyun if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1520*4882a593Smuzhiyun break;
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun if (i >= SCI_MAX_REMOTE_DEVICES) {
1524*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1525*4882a593Smuzhiyun return NULL;
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1528*4882a593Smuzhiyun return NULL;
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun return idev;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun
isci_remote_device_release(struct kref * kref)1533*4882a593Smuzhiyun void isci_remote_device_release(struct kref *kref)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1536*4882a593Smuzhiyun struct isci_host *ihost = idev->isci_port->isci_host;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun idev->domain_dev = NULL;
1539*4882a593Smuzhiyun idev->isci_port = NULL;
1540*4882a593Smuzhiyun clear_bit(IDEV_START_PENDING, &idev->flags);
1541*4882a593Smuzhiyun clear_bit(IDEV_STOP_PENDING, &idev->flags);
1542*4882a593Smuzhiyun clear_bit(IDEV_IO_READY, &idev->flags);
1543*4882a593Smuzhiyun clear_bit(IDEV_GONE, &idev->flags);
1544*4882a593Smuzhiyun smp_mb__before_atomic();
1545*4882a593Smuzhiyun clear_bit(IDEV_ALLOCATED, &idev->flags);
1546*4882a593Smuzhiyun wake_up(&ihost->eventq);
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun /**
1550*4882a593Smuzhiyun * isci_remote_device_stop() - This function is called internally to stop the
1551*4882a593Smuzhiyun * remote device.
1552*4882a593Smuzhiyun * @isci_host: This parameter specifies the isci host object.
1553*4882a593Smuzhiyun * @isci_device: This parameter specifies the remote device.
1554*4882a593Smuzhiyun *
1555*4882a593Smuzhiyun * The status of the ihost request to stop.
1556*4882a593Smuzhiyun */
isci_remote_device_stop(struct isci_host * ihost,struct isci_remote_device * idev)1557*4882a593Smuzhiyun enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1558*4882a593Smuzhiyun {
1559*4882a593Smuzhiyun enum sci_status status;
1560*4882a593Smuzhiyun unsigned long flags;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
1563*4882a593Smuzhiyun "%s: isci_device = %p\n", __func__, idev);
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
1566*4882a593Smuzhiyun idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1567*4882a593Smuzhiyun set_bit(IDEV_GONE, &idev->flags);
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun set_bit(IDEV_STOP_PENDING, &idev->flags);
1570*4882a593Smuzhiyun status = sci_remote_device_stop(idev, 50);
1571*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun /* Wait for the stop complete callback. */
1574*4882a593Smuzhiyun if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1575*4882a593Smuzhiyun /* nothing to wait for */;
1576*4882a593Smuzhiyun else
1577*4882a593Smuzhiyun wait_for_device_stop(ihost, idev);
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
1580*4882a593Smuzhiyun "%s: isci_device = %p, waiting done.\n", __func__, idev);
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun return status;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun /**
1586*4882a593Smuzhiyun * isci_remote_device_gone() - This function is called by libsas when a domain
1587*4882a593Smuzhiyun * device is removed.
1588*4882a593Smuzhiyun * @domain_device: This parameter specifies the libsas domain device.
1589*4882a593Smuzhiyun *
1590*4882a593Smuzhiyun */
isci_remote_device_gone(struct domain_device * dev)1591*4882a593Smuzhiyun void isci_remote_device_gone(struct domain_device *dev)
1592*4882a593Smuzhiyun {
1593*4882a593Smuzhiyun struct isci_host *ihost = dev_to_ihost(dev);
1594*4882a593Smuzhiyun struct isci_remote_device *idev = dev->lldd_dev;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
1597*4882a593Smuzhiyun "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1598*4882a593Smuzhiyun __func__, dev, idev, idev->isci_port);
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun isci_remote_device_stop(ihost, idev);
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun /**
1605*4882a593Smuzhiyun * isci_remote_device_found() - This function is called by libsas when a remote
1606*4882a593Smuzhiyun * device is discovered. A remote device object is created and started. the
1607*4882a593Smuzhiyun * function then sleeps until the sci core device started message is
1608*4882a593Smuzhiyun * received.
1609*4882a593Smuzhiyun * @domain_device: This parameter specifies the libsas domain device.
1610*4882a593Smuzhiyun *
1611*4882a593Smuzhiyun * status, zero indicates success.
1612*4882a593Smuzhiyun */
isci_remote_device_found(struct domain_device * dev)1613*4882a593Smuzhiyun int isci_remote_device_found(struct domain_device *dev)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun struct isci_host *isci_host = dev_to_ihost(dev);
1616*4882a593Smuzhiyun struct isci_port *isci_port = dev->port->lldd_port;
1617*4882a593Smuzhiyun struct isci_remote_device *isci_device;
1618*4882a593Smuzhiyun enum sci_status status;
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun dev_dbg(&isci_host->pdev->dev,
1621*4882a593Smuzhiyun "%s: domain_device = %p\n", __func__, dev);
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun if (!isci_port)
1624*4882a593Smuzhiyun return -ENODEV;
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun isci_device = isci_remote_device_alloc(isci_host, isci_port);
1627*4882a593Smuzhiyun if (!isci_device)
1628*4882a593Smuzhiyun return -ENODEV;
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun kref_init(&isci_device->kref);
1631*4882a593Smuzhiyun INIT_LIST_HEAD(&isci_device->node);
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun spin_lock_irq(&isci_host->scic_lock);
1634*4882a593Smuzhiyun isci_device->domain_dev = dev;
1635*4882a593Smuzhiyun isci_device->isci_port = isci_port;
1636*4882a593Smuzhiyun list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun set_bit(IDEV_START_PENDING, &isci_device->flags);
1639*4882a593Smuzhiyun status = isci_remote_device_construct(isci_port, isci_device);
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun dev_dbg(&isci_host->pdev->dev,
1642*4882a593Smuzhiyun "%s: isci_device = %p\n",
1643*4882a593Smuzhiyun __func__, isci_device);
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun if (status == SCI_SUCCESS) {
1646*4882a593Smuzhiyun /* device came up, advertise it to the world */
1647*4882a593Smuzhiyun dev->lldd_dev = isci_device;
1648*4882a593Smuzhiyun } else
1649*4882a593Smuzhiyun isci_put_device(isci_device);
1650*4882a593Smuzhiyun spin_unlock_irq(&isci_host->scic_lock);
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun /* wait for the device ready callback. */
1653*4882a593Smuzhiyun wait_for_device_start(isci_host, isci_device);
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun return status == SCI_SUCCESS ? 0 : -ENODEV;
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun
isci_remote_device_suspend_terminate(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)1658*4882a593Smuzhiyun enum sci_status isci_remote_device_suspend_terminate(
1659*4882a593Smuzhiyun struct isci_host *ihost,
1660*4882a593Smuzhiyun struct isci_remote_device *idev,
1661*4882a593Smuzhiyun struct isci_request *ireq)
1662*4882a593Smuzhiyun {
1663*4882a593Smuzhiyun unsigned long flags;
1664*4882a593Smuzhiyun enum sci_status status;
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun /* Put the device into suspension. */
1667*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
1668*4882a593Smuzhiyun set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1669*4882a593Smuzhiyun sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1670*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun /* Terminate and wait for the completions. */
1673*4882a593Smuzhiyun status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1674*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1675*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
1676*4882a593Smuzhiyun "%s: isci_remote_device_terminate_requests(%p) "
1677*4882a593Smuzhiyun "returned %d!\n",
1678*4882a593Smuzhiyun __func__, idev, status);
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun /* NOTE: RNC resumption is left to the caller! */
1681*4882a593Smuzhiyun return status;
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun
isci_remote_device_is_safe_to_abort(struct isci_remote_device * idev)1684*4882a593Smuzhiyun int isci_remote_device_is_safe_to_abort(
1685*4882a593Smuzhiyun struct isci_remote_device *idev)
1686*4882a593Smuzhiyun {
1687*4882a593Smuzhiyun return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun
sci_remote_device_abort_requests_pending_abort(struct isci_remote_device * idev)1690*4882a593Smuzhiyun enum sci_status sci_remote_device_abort_requests_pending_abort(
1691*4882a593Smuzhiyun struct isci_remote_device *idev)
1692*4882a593Smuzhiyun {
1693*4882a593Smuzhiyun return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun
isci_remote_device_reset_complete(struct isci_host * ihost,struct isci_remote_device * idev)1696*4882a593Smuzhiyun enum sci_status isci_remote_device_reset_complete(
1697*4882a593Smuzhiyun struct isci_host *ihost,
1698*4882a593Smuzhiyun struct isci_remote_device *idev)
1699*4882a593Smuzhiyun {
1700*4882a593Smuzhiyun unsigned long flags;
1701*4882a593Smuzhiyun enum sci_status status;
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
1704*4882a593Smuzhiyun status = sci_remote_device_reset_complete(idev);
1705*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun return status;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun
isci_dev_set_hang_detection_timeout(struct isci_remote_device * idev,u32 timeout)1710*4882a593Smuzhiyun void isci_dev_set_hang_detection_timeout(
1711*4882a593Smuzhiyun struct isci_remote_device *idev,
1712*4882a593Smuzhiyun u32 timeout)
1713*4882a593Smuzhiyun {
1714*4882a593Smuzhiyun if (dev_is_sata(idev->domain_dev)) {
1715*4882a593Smuzhiyun if (timeout) {
1716*4882a593Smuzhiyun if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1717*4882a593Smuzhiyun &idev->flags))
1718*4882a593Smuzhiyun return; /* Already enabled. */
1719*4882a593Smuzhiyun } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1720*4882a593Smuzhiyun &idev->flags))
1721*4882a593Smuzhiyun return; /* Not enabled. */
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun sci_port_set_hang_detection_timeout(idev->owning_port,
1724*4882a593Smuzhiyun timeout);
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun }
1727