1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Aic94xx Task Management Functions
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
6*4882a593Smuzhiyun * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun #include <linux/gfp.h>
11*4882a593Smuzhiyun #include "aic94xx.h"
12*4882a593Smuzhiyun #include "aic94xx_sas.h"
13*4882a593Smuzhiyun #include "aic94xx_hwi.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /* ---------- Internal enqueue ---------- */
16*4882a593Smuzhiyun
asd_enqueue_internal(struct asd_ascb * ascb,void (* tasklet_complete)(struct asd_ascb *,struct done_list_struct *),void (* timed_out)(struct timer_list * t))17*4882a593Smuzhiyun static int asd_enqueue_internal(struct asd_ascb *ascb,
18*4882a593Smuzhiyun void (*tasklet_complete)(struct asd_ascb *,
19*4882a593Smuzhiyun struct done_list_struct *),
20*4882a593Smuzhiyun void (*timed_out)(struct timer_list *t))
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun int res;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun ascb->tasklet_complete = tasklet_complete;
25*4882a593Smuzhiyun ascb->uldd_timer = 1;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun ascb->timer.function = timed_out;
28*4882a593Smuzhiyun ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun add_timer(&ascb->timer);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun res = asd_post_ascb_list(ascb->ha, ascb, 1);
33*4882a593Smuzhiyun if (unlikely(res))
34*4882a593Smuzhiyun del_timer(&ascb->timer);
35*4882a593Smuzhiyun return res;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* ---------- CLEAR NEXUS ---------- */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct tasklet_completion_status {
41*4882a593Smuzhiyun int dl_opcode;
42*4882a593Smuzhiyun int tmf_state;
43*4882a593Smuzhiyun u8 tag_valid:1;
44*4882a593Smuzhiyun __be16 tag;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define DECLARE_TCS(tcs) \
48*4882a593Smuzhiyun struct tasklet_completion_status tcs = { \
49*4882a593Smuzhiyun .dl_opcode = 0, \
50*4882a593Smuzhiyun .tmf_state = 0, \
51*4882a593Smuzhiyun .tag_valid = 0, \
52*4882a593Smuzhiyun .tag = 0, \
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun
asd_clear_nexus_tasklet_complete(struct asd_ascb * ascb,struct done_list_struct * dl)56*4882a593Smuzhiyun static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
57*4882a593Smuzhiyun struct done_list_struct *dl)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun struct tasklet_completion_status *tcs = ascb->uldd_task;
60*4882a593Smuzhiyun ASD_DPRINTK("%s: here\n", __func__);
61*4882a593Smuzhiyun if (!del_timer(&ascb->timer)) {
62*4882a593Smuzhiyun ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
63*4882a593Smuzhiyun return;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
66*4882a593Smuzhiyun tcs->dl_opcode = dl->opcode;
67*4882a593Smuzhiyun complete(ascb->completion);
68*4882a593Smuzhiyun asd_ascb_free(ascb);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
asd_clear_nexus_timedout(struct timer_list * t)71*4882a593Smuzhiyun static void asd_clear_nexus_timedout(struct timer_list *t)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct asd_ascb *ascb = from_timer(ascb, t, timer);
74*4882a593Smuzhiyun struct tasklet_completion_status *tcs = ascb->uldd_task;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun ASD_DPRINTK("%s: here\n", __func__);
77*4882a593Smuzhiyun tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
78*4882a593Smuzhiyun complete(ascb->completion);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define CLEAR_NEXUS_PRE \
82*4882a593Smuzhiyun struct asd_ascb *ascb; \
83*4882a593Smuzhiyun struct scb *scb; \
84*4882a593Smuzhiyun int res; \
85*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(completion); \
86*4882a593Smuzhiyun DECLARE_TCS(tcs); \
87*4882a593Smuzhiyun \
88*4882a593Smuzhiyun ASD_DPRINTK("%s: PRE\n", __func__); \
89*4882a593Smuzhiyun res = 1; \
90*4882a593Smuzhiyun ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
91*4882a593Smuzhiyun if (!ascb) \
92*4882a593Smuzhiyun return -ENOMEM; \
93*4882a593Smuzhiyun \
94*4882a593Smuzhiyun ascb->completion = &completion; \
95*4882a593Smuzhiyun ascb->uldd_task = &tcs; \
96*4882a593Smuzhiyun scb = ascb->scb; \
97*4882a593Smuzhiyun scb->header.opcode = CLEAR_NEXUS
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define CLEAR_NEXUS_POST \
100*4882a593Smuzhiyun ASD_DPRINTK("%s: POST\n", __func__); \
101*4882a593Smuzhiyun res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
102*4882a593Smuzhiyun asd_clear_nexus_timedout); \
103*4882a593Smuzhiyun if (res) \
104*4882a593Smuzhiyun goto out_err; \
105*4882a593Smuzhiyun ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
106*4882a593Smuzhiyun wait_for_completion(&completion); \
107*4882a593Smuzhiyun res = tcs.dl_opcode; \
108*4882a593Smuzhiyun if (res == TC_NO_ERROR) \
109*4882a593Smuzhiyun res = TMF_RESP_FUNC_COMPLETE; \
110*4882a593Smuzhiyun return res; \
111*4882a593Smuzhiyun out_err: \
112*4882a593Smuzhiyun asd_ascb_free(ascb); \
113*4882a593Smuzhiyun return res
114*4882a593Smuzhiyun
asd_clear_nexus_ha(struct sas_ha_struct * sas_ha)115*4882a593Smuzhiyun int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun CLEAR_NEXUS_PRE;
120*4882a593Smuzhiyun scb->clear_nexus.nexus = NEXUS_ADAPTER;
121*4882a593Smuzhiyun CLEAR_NEXUS_POST;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
asd_clear_nexus_port(struct asd_sas_port * port)124*4882a593Smuzhiyun int asd_clear_nexus_port(struct asd_sas_port *port)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun CLEAR_NEXUS_PRE;
129*4882a593Smuzhiyun scb->clear_nexus.nexus = NEXUS_PORT;
130*4882a593Smuzhiyun scb->clear_nexus.conn_mask = port->phy_mask;
131*4882a593Smuzhiyun CLEAR_NEXUS_POST;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun enum clear_nexus_phase {
135*4882a593Smuzhiyun NEXUS_PHASE_PRE,
136*4882a593Smuzhiyun NEXUS_PHASE_POST,
137*4882a593Smuzhiyun NEXUS_PHASE_RESUME,
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
asd_clear_nexus_I_T(struct domain_device * dev,enum clear_nexus_phase phase)140*4882a593Smuzhiyun static int asd_clear_nexus_I_T(struct domain_device *dev,
141*4882a593Smuzhiyun enum clear_nexus_phase phase)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun CLEAR_NEXUS_PRE;
146*4882a593Smuzhiyun scb->clear_nexus.nexus = NEXUS_I_T;
147*4882a593Smuzhiyun switch (phase) {
148*4882a593Smuzhiyun case NEXUS_PHASE_PRE:
149*4882a593Smuzhiyun scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
150*4882a593Smuzhiyun break;
151*4882a593Smuzhiyun case NEXUS_PHASE_POST:
152*4882a593Smuzhiyun scb->clear_nexus.flags = SEND_Q | NOTINQ;
153*4882a593Smuzhiyun break;
154*4882a593Smuzhiyun case NEXUS_PHASE_RESUME:
155*4882a593Smuzhiyun scb->clear_nexus.flags = RESUME_TX;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
158*4882a593Smuzhiyun dev->lldd_dev);
159*4882a593Smuzhiyun CLEAR_NEXUS_POST;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
asd_I_T_nexus_reset(struct domain_device * dev)162*4882a593Smuzhiyun int asd_I_T_nexus_reset(struct domain_device *dev)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun int res, tmp_res, i;
165*4882a593Smuzhiyun struct sas_phy *phy = sas_get_local_phy(dev);
166*4882a593Smuzhiyun /* Standard mandates link reset for ATA (type 0) and
167*4882a593Smuzhiyun * hard reset for SSP (type 1) */
168*4882a593Smuzhiyun int reset_type = (dev->dev_type == SAS_SATA_DEV ||
169*4882a593Smuzhiyun (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
172*4882a593Smuzhiyun /* send a hard reset */
173*4882a593Smuzhiyun ASD_DPRINTK("sending %s reset to %s\n",
174*4882a593Smuzhiyun reset_type ? "hard" : "soft", dev_name(&phy->dev));
175*4882a593Smuzhiyun res = sas_phy_reset(phy, reset_type);
176*4882a593Smuzhiyun if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
177*4882a593Smuzhiyun /* wait for the maximum settle time */
178*4882a593Smuzhiyun msleep(500);
179*4882a593Smuzhiyun /* clear all outstanding commands (keep nexus suspended) */
180*4882a593Smuzhiyun asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun for (i = 0 ; i < 3; i++) {
183*4882a593Smuzhiyun tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
184*4882a593Smuzhiyun if (tmp_res == TC_RESUME)
185*4882a593Smuzhiyun goto out;
186*4882a593Smuzhiyun msleep(500);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* This is a bit of a problem: the sequencer is still suspended
190*4882a593Smuzhiyun * and is refusing to resume. Hope it will resume on a bigger hammer
191*4882a593Smuzhiyun * or the disk is lost */
192*4882a593Smuzhiyun dev_printk(KERN_ERR, &phy->dev,
193*4882a593Smuzhiyun "Failed to resume nexus after reset 0x%x\n", tmp_res);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun res = TMF_RESP_FUNC_FAILED;
196*4882a593Smuzhiyun out:
197*4882a593Smuzhiyun sas_put_local_phy(phy);
198*4882a593Smuzhiyun return res;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
asd_clear_nexus_I_T_L(struct domain_device * dev,u8 * lun)201*4882a593Smuzhiyun static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun CLEAR_NEXUS_PRE;
206*4882a593Smuzhiyun scb->clear_nexus.nexus = NEXUS_I_T_L;
207*4882a593Smuzhiyun scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
208*4882a593Smuzhiyun memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
209*4882a593Smuzhiyun scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
210*4882a593Smuzhiyun dev->lldd_dev);
211*4882a593Smuzhiyun CLEAR_NEXUS_POST;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
asd_clear_nexus_tag(struct sas_task * task)214*4882a593Smuzhiyun static int asd_clear_nexus_tag(struct sas_task *task)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
217*4882a593Smuzhiyun struct asd_ascb *tascb = task->lldd_task;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun CLEAR_NEXUS_PRE;
220*4882a593Smuzhiyun scb->clear_nexus.nexus = NEXUS_TAG;
221*4882a593Smuzhiyun memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
222*4882a593Smuzhiyun scb->clear_nexus.ssp_task.tag = tascb->tag;
223*4882a593Smuzhiyun if (task->dev->tproto)
224*4882a593Smuzhiyun scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
225*4882a593Smuzhiyun task->dev->lldd_dev);
226*4882a593Smuzhiyun CLEAR_NEXUS_POST;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
asd_clear_nexus_index(struct sas_task * task)229*4882a593Smuzhiyun static int asd_clear_nexus_index(struct sas_task *task)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
232*4882a593Smuzhiyun struct asd_ascb *tascb = task->lldd_task;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun CLEAR_NEXUS_PRE;
235*4882a593Smuzhiyun scb->clear_nexus.nexus = NEXUS_TRANS_CX;
236*4882a593Smuzhiyun if (task->dev->tproto)
237*4882a593Smuzhiyun scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
238*4882a593Smuzhiyun task->dev->lldd_dev);
239*4882a593Smuzhiyun scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
240*4882a593Smuzhiyun CLEAR_NEXUS_POST;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* ---------- TMFs ---------- */
244*4882a593Smuzhiyun
asd_tmf_timedout(struct timer_list * t)245*4882a593Smuzhiyun static void asd_tmf_timedout(struct timer_list *t)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct asd_ascb *ascb = from_timer(ascb, t, timer);
248*4882a593Smuzhiyun struct tasklet_completion_status *tcs = ascb->uldd_task;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun ASD_DPRINTK("tmf timed out\n");
251*4882a593Smuzhiyun tcs->tmf_state = TMF_RESP_FUNC_FAILED;
252*4882a593Smuzhiyun complete(ascb->completion);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
asd_get_tmf_resp_tasklet(struct asd_ascb * ascb,struct done_list_struct * dl)255*4882a593Smuzhiyun static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
256*4882a593Smuzhiyun struct done_list_struct *dl)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = ascb->ha;
259*4882a593Smuzhiyun unsigned long flags;
260*4882a593Smuzhiyun struct tc_resp_sb_struct {
261*4882a593Smuzhiyun __le16 index_escb;
262*4882a593Smuzhiyun u8 len_lsb;
263*4882a593Smuzhiyun u8 flags;
264*4882a593Smuzhiyun } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
267*4882a593Smuzhiyun struct asd_ascb *escb;
268*4882a593Smuzhiyun struct asd_dma_tok *edb;
269*4882a593Smuzhiyun struct ssp_frame_hdr *fh;
270*4882a593Smuzhiyun struct ssp_response_iu *ru;
271*4882a593Smuzhiyun int res = TMF_RESP_FUNC_FAILED;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun ASD_DPRINTK("tmf resp tasklet\n");
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
276*4882a593Smuzhiyun escb = asd_tc_index_find(&asd_ha->seq,
277*4882a593Smuzhiyun (int)le16_to_cpu(resp_sb->index_escb));
278*4882a593Smuzhiyun spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (!escb) {
281*4882a593Smuzhiyun ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
282*4882a593Smuzhiyun return res;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
286*4882a593Smuzhiyun ascb->tag = *(__be16 *)(edb->vaddr+4);
287*4882a593Smuzhiyun fh = edb->vaddr + 16;
288*4882a593Smuzhiyun ru = edb->vaddr + 16 + sizeof(*fh);
289*4882a593Smuzhiyun res = ru->status;
290*4882a593Smuzhiyun if (ru->datapres == 1) /* Response data present */
291*4882a593Smuzhiyun res = ru->resp_data[3];
292*4882a593Smuzhiyun #if 0
293*4882a593Smuzhiyun ascb->tag = fh->tag;
294*4882a593Smuzhiyun #endif
295*4882a593Smuzhiyun ascb->tag_valid = 1;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun asd_invalidate_edb(escb, edb_id);
298*4882a593Smuzhiyun return res;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
asd_tmf_tasklet_complete(struct asd_ascb * ascb,struct done_list_struct * dl)301*4882a593Smuzhiyun static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
302*4882a593Smuzhiyun struct done_list_struct *dl)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct tasklet_completion_status *tcs;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (!del_timer(&ascb->timer))
307*4882a593Smuzhiyun return;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun tcs = ascb->uldd_task;
310*4882a593Smuzhiyun ASD_DPRINTK("tmf tasklet complete\n");
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun tcs->dl_opcode = dl->opcode;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (dl->opcode == TC_SSP_RESP) {
315*4882a593Smuzhiyun tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
316*4882a593Smuzhiyun tcs->tag_valid = ascb->tag_valid;
317*4882a593Smuzhiyun tcs->tag = ascb->tag;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun complete(ascb->completion);
321*4882a593Smuzhiyun asd_ascb_free(ascb);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
asd_clear_nexus(struct sas_task * task)324*4882a593Smuzhiyun static int asd_clear_nexus(struct sas_task *task)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun int res = TMF_RESP_FUNC_FAILED;
327*4882a593Smuzhiyun int leftover;
328*4882a593Smuzhiyun struct asd_ascb *tascb = task->lldd_task;
329*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(completion);
330*4882a593Smuzhiyun unsigned long flags;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun tascb->completion = &completion;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun ASD_DPRINTK("task not done, clearing nexus\n");
335*4882a593Smuzhiyun if (tascb->tag_valid)
336*4882a593Smuzhiyun res = asd_clear_nexus_tag(task);
337*4882a593Smuzhiyun else
338*4882a593Smuzhiyun res = asd_clear_nexus_index(task);
339*4882a593Smuzhiyun leftover = wait_for_completion_timeout(&completion,
340*4882a593Smuzhiyun AIC94XX_SCB_TIMEOUT);
341*4882a593Smuzhiyun tascb->completion = NULL;
342*4882a593Smuzhiyun ASD_DPRINTK("came back from clear nexus\n");
343*4882a593Smuzhiyun spin_lock_irqsave(&task->task_state_lock, flags);
344*4882a593Smuzhiyun if (leftover < 1)
345*4882a593Smuzhiyun res = TMF_RESP_FUNC_FAILED;
346*4882a593Smuzhiyun if (task->task_state_flags & SAS_TASK_STATE_DONE)
347*4882a593Smuzhiyun res = TMF_RESP_FUNC_COMPLETE;
348*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, flags);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun return res;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /**
354*4882a593Smuzhiyun * asd_abort_task -- ABORT TASK TMF
355*4882a593Smuzhiyun * @task: the task to be aborted
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * Before calling ABORT TASK the task state flags should be ORed with
358*4882a593Smuzhiyun * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
359*4882a593Smuzhiyun * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
360*4882a593Smuzhiyun *
361*4882a593Smuzhiyun * Implements the ABORT TASK TMF, I_T_L_Q nexus.
362*4882a593Smuzhiyun * Returns: SAS TMF responses (see sas_task.h),
363*4882a593Smuzhiyun * -ENOMEM,
364*4882a593Smuzhiyun * -SAS_QUEUE_FULL.
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * When ABORT TASK returns, the caller of ABORT TASK checks first the
367*4882a593Smuzhiyun * task->task_state_flags, and then the return value of ABORT TASK.
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * If the task has task state bit SAS_TASK_STATE_DONE set, then the
370*4882a593Smuzhiyun * task was completed successfully prior to it being aborted. The
371*4882a593Smuzhiyun * caller of ABORT TASK has responsibility to call task->task_done()
372*4882a593Smuzhiyun * xor free the task, depending on their framework. The return code
373*4882a593Smuzhiyun * is TMF_RESP_FUNC_FAILED in this case.
374*4882a593Smuzhiyun *
375*4882a593Smuzhiyun * Else the SAS_TASK_STATE_DONE bit is not set,
376*4882a593Smuzhiyun * If the return code is TMF_RESP_FUNC_COMPLETE, then
377*4882a593Smuzhiyun * the task was aborted successfully. The caller of
378*4882a593Smuzhiyun * ABORT TASK has responsibility to call task->task_done()
379*4882a593Smuzhiyun * to finish the task, xor free the task depending on their
380*4882a593Smuzhiyun * framework.
381*4882a593Smuzhiyun * else
382*4882a593Smuzhiyun * the ABORT TASK returned some kind of error. The task
383*4882a593Smuzhiyun * was _not_ cancelled. Nothing can be assumed.
384*4882a593Smuzhiyun * The caller of ABORT TASK may wish to retry.
385*4882a593Smuzhiyun */
asd_abort_task(struct sas_task * task)386*4882a593Smuzhiyun int asd_abort_task(struct sas_task *task)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun struct asd_ascb *tascb = task->lldd_task;
389*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = tascb->ha;
390*4882a593Smuzhiyun int res = 1;
391*4882a593Smuzhiyun unsigned long flags;
392*4882a593Smuzhiyun struct asd_ascb *ascb = NULL;
393*4882a593Smuzhiyun struct scb *scb;
394*4882a593Smuzhiyun int leftover;
395*4882a593Smuzhiyun DECLARE_TCS(tcs);
396*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(completion);
397*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(tascb_completion);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun tascb->completion = &tascb_completion;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun spin_lock_irqsave(&task->task_state_lock, flags);
402*4882a593Smuzhiyun if (task->task_state_flags & SAS_TASK_STATE_DONE) {
403*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, flags);
404*4882a593Smuzhiyun res = TMF_RESP_FUNC_COMPLETE;
405*4882a593Smuzhiyun ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
406*4882a593Smuzhiyun goto out_done;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, flags);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
411*4882a593Smuzhiyun if (!ascb)
412*4882a593Smuzhiyun return -ENOMEM;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun ascb->uldd_task = &tcs;
415*4882a593Smuzhiyun ascb->completion = &completion;
416*4882a593Smuzhiyun scb = ascb->scb;
417*4882a593Smuzhiyun scb->header.opcode = SCB_ABORT_TASK;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun switch (task->task_proto) {
420*4882a593Smuzhiyun case SAS_PROTOCOL_SATA:
421*4882a593Smuzhiyun case SAS_PROTOCOL_STP:
422*4882a593Smuzhiyun scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
423*4882a593Smuzhiyun break;
424*4882a593Smuzhiyun case SAS_PROTOCOL_SSP:
425*4882a593Smuzhiyun scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
426*4882a593Smuzhiyun scb->abort_task.proto_conn_rate |= task->dev->linkrate;
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun case SAS_PROTOCOL_SMP:
429*4882a593Smuzhiyun break;
430*4882a593Smuzhiyun default:
431*4882a593Smuzhiyun break;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (task->task_proto == SAS_PROTOCOL_SSP) {
435*4882a593Smuzhiyun scb->abort_task.ssp_frame.frame_type = SSP_TASK;
436*4882a593Smuzhiyun memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
437*4882a593Smuzhiyun task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
438*4882a593Smuzhiyun memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
439*4882a593Smuzhiyun task->dev->port->ha->hashed_sas_addr,
440*4882a593Smuzhiyun HASHED_SAS_ADDR_SIZE);
441*4882a593Smuzhiyun scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
444*4882a593Smuzhiyun scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
445*4882a593Smuzhiyun scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
449*4882a593Smuzhiyun scb->abort_task.conn_handle = cpu_to_le16(
450*4882a593Smuzhiyun (u16)(unsigned long)task->dev->lldd_dev);
451*4882a593Smuzhiyun scb->abort_task.retry_count = 1;
452*4882a593Smuzhiyun scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
453*4882a593Smuzhiyun scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
456*4882a593Smuzhiyun asd_tmf_timedout);
457*4882a593Smuzhiyun if (res)
458*4882a593Smuzhiyun goto out_free;
459*4882a593Smuzhiyun wait_for_completion(&completion);
460*4882a593Smuzhiyun ASD_DPRINTK("tmf came back\n");
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun tascb->tag = tcs.tag;
463*4882a593Smuzhiyun tascb->tag_valid = tcs.tag_valid;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun spin_lock_irqsave(&task->task_state_lock, flags);
466*4882a593Smuzhiyun if (task->task_state_flags & SAS_TASK_STATE_DONE) {
467*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, flags);
468*4882a593Smuzhiyun res = TMF_RESP_FUNC_COMPLETE;
469*4882a593Smuzhiyun ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
470*4882a593Smuzhiyun goto out_done;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, flags);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (tcs.dl_opcode == TC_SSP_RESP) {
475*4882a593Smuzhiyun /* The task to be aborted has been sent to the device.
476*4882a593Smuzhiyun * We got a Response IU for the ABORT TASK TMF. */
477*4882a593Smuzhiyun if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
478*4882a593Smuzhiyun res = asd_clear_nexus(task);
479*4882a593Smuzhiyun else
480*4882a593Smuzhiyun res = tcs.tmf_state;
481*4882a593Smuzhiyun } else if (tcs.dl_opcode == TC_NO_ERROR &&
482*4882a593Smuzhiyun tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
483*4882a593Smuzhiyun /* timeout */
484*4882a593Smuzhiyun res = TMF_RESP_FUNC_FAILED;
485*4882a593Smuzhiyun } else {
486*4882a593Smuzhiyun /* In the following we assume that the managing layer
487*4882a593Smuzhiyun * will _never_ make a mistake, when issuing ABORT
488*4882a593Smuzhiyun * TASK.
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun switch (tcs.dl_opcode) {
491*4882a593Smuzhiyun default:
492*4882a593Smuzhiyun res = asd_clear_nexus(task);
493*4882a593Smuzhiyun fallthrough;
494*4882a593Smuzhiyun case TC_NO_ERROR:
495*4882a593Smuzhiyun break;
496*4882a593Smuzhiyun /* The task hasn't been sent to the device xor
497*4882a593Smuzhiyun * we never got a (sane) Response IU for the
498*4882a593Smuzhiyun * ABORT TASK TMF.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun case TF_NAK_RECV:
501*4882a593Smuzhiyun res = TMF_RESP_INVALID_FRAME;
502*4882a593Smuzhiyun break;
503*4882a593Smuzhiyun case TF_TMF_TASK_DONE: /* done but not reported yet */
504*4882a593Smuzhiyun res = TMF_RESP_FUNC_FAILED;
505*4882a593Smuzhiyun leftover =
506*4882a593Smuzhiyun wait_for_completion_timeout(&tascb_completion,
507*4882a593Smuzhiyun AIC94XX_SCB_TIMEOUT);
508*4882a593Smuzhiyun spin_lock_irqsave(&task->task_state_lock, flags);
509*4882a593Smuzhiyun if (leftover < 1)
510*4882a593Smuzhiyun res = TMF_RESP_FUNC_FAILED;
511*4882a593Smuzhiyun if (task->task_state_flags & SAS_TASK_STATE_DONE)
512*4882a593Smuzhiyun res = TMF_RESP_FUNC_COMPLETE;
513*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, flags);
514*4882a593Smuzhiyun break;
515*4882a593Smuzhiyun case TF_TMF_NO_TAG:
516*4882a593Smuzhiyun case TF_TMF_TAG_FREE: /* the tag is in the free list */
517*4882a593Smuzhiyun case TF_TMF_NO_CONN_HANDLE: /* no such device */
518*4882a593Smuzhiyun res = TMF_RESP_FUNC_COMPLETE;
519*4882a593Smuzhiyun break;
520*4882a593Smuzhiyun case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
521*4882a593Smuzhiyun res = TMF_RESP_FUNC_ESUPP;
522*4882a593Smuzhiyun break;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun out_done:
526*4882a593Smuzhiyun tascb->completion = NULL;
527*4882a593Smuzhiyun if (res == TMF_RESP_FUNC_COMPLETE) {
528*4882a593Smuzhiyun task->lldd_task = NULL;
529*4882a593Smuzhiyun mb();
530*4882a593Smuzhiyun asd_ascb_free(tascb);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
533*4882a593Smuzhiyun return res;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun out_free:
536*4882a593Smuzhiyun asd_ascb_free(ascb);
537*4882a593Smuzhiyun ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
538*4882a593Smuzhiyun return res;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /**
542*4882a593Smuzhiyun * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
543*4882a593Smuzhiyun * @dev: pointer to struct domain_device of interest
544*4882a593Smuzhiyun * @lun: pointer to u8[8] which is the LUN
545*4882a593Smuzhiyun * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
546*4882a593Smuzhiyun * @index: the transaction context of the task to be queried if QT TMF
547*4882a593Smuzhiyun *
548*4882a593Smuzhiyun * This function is used to send ABORT TASK SET, CLEAR ACA,
549*4882a593Smuzhiyun * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * No SCBs should be queued to the I_T_L nexus when this SCB is
552*4882a593Smuzhiyun * pending.
553*4882a593Smuzhiyun *
554*4882a593Smuzhiyun * Returns: TMF response code (see sas_task.h or the SAS spec)
555*4882a593Smuzhiyun */
asd_initiate_ssp_tmf(struct domain_device * dev,u8 * lun,int tmf,int index)556*4882a593Smuzhiyun static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
557*4882a593Smuzhiyun int tmf, int index)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
560*4882a593Smuzhiyun struct asd_ascb *ascb;
561*4882a593Smuzhiyun int res = 1;
562*4882a593Smuzhiyun struct scb *scb;
563*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(completion);
564*4882a593Smuzhiyun DECLARE_TCS(tcs);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (!(dev->tproto & SAS_PROTOCOL_SSP))
567*4882a593Smuzhiyun return TMF_RESP_FUNC_ESUPP;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
570*4882a593Smuzhiyun if (!ascb)
571*4882a593Smuzhiyun return -ENOMEM;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun ascb->completion = &completion;
574*4882a593Smuzhiyun ascb->uldd_task = &tcs;
575*4882a593Smuzhiyun scb = ascb->scb;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (tmf == TMF_QUERY_TASK)
578*4882a593Smuzhiyun scb->header.opcode = QUERY_SSP_TASK;
579*4882a593Smuzhiyun else
580*4882a593Smuzhiyun scb->header.opcode = INITIATE_SSP_TMF;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
583*4882a593Smuzhiyun scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
584*4882a593Smuzhiyun /* SSP frame header */
585*4882a593Smuzhiyun scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
586*4882a593Smuzhiyun memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
587*4882a593Smuzhiyun dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
588*4882a593Smuzhiyun memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
589*4882a593Smuzhiyun dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
590*4882a593Smuzhiyun scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
591*4882a593Smuzhiyun /* SSP Task IU */
592*4882a593Smuzhiyun memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
593*4882a593Smuzhiyun scb->ssp_tmf.ssp_task.tmf = tmf;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
596*4882a593Smuzhiyun scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
597*4882a593Smuzhiyun dev->lldd_dev);
598*4882a593Smuzhiyun scb->ssp_tmf.retry_count = 1;
599*4882a593Smuzhiyun scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
600*4882a593Smuzhiyun if (tmf == TMF_QUERY_TASK)
601*4882a593Smuzhiyun scb->ssp_tmf.index = cpu_to_le16(index);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
604*4882a593Smuzhiyun asd_tmf_timedout);
605*4882a593Smuzhiyun if (res)
606*4882a593Smuzhiyun goto out_err;
607*4882a593Smuzhiyun wait_for_completion(&completion);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun switch (tcs.dl_opcode) {
610*4882a593Smuzhiyun case TC_NO_ERROR:
611*4882a593Smuzhiyun res = TMF_RESP_FUNC_COMPLETE;
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun case TF_NAK_RECV:
614*4882a593Smuzhiyun res = TMF_RESP_INVALID_FRAME;
615*4882a593Smuzhiyun break;
616*4882a593Smuzhiyun case TF_TMF_TASK_DONE:
617*4882a593Smuzhiyun res = TMF_RESP_FUNC_FAILED;
618*4882a593Smuzhiyun break;
619*4882a593Smuzhiyun case TF_TMF_NO_TAG:
620*4882a593Smuzhiyun case TF_TMF_TAG_FREE: /* the tag is in the free list */
621*4882a593Smuzhiyun case TF_TMF_NO_CONN_HANDLE: /* no such device */
622*4882a593Smuzhiyun res = TMF_RESP_FUNC_COMPLETE;
623*4882a593Smuzhiyun break;
624*4882a593Smuzhiyun case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
625*4882a593Smuzhiyun res = TMF_RESP_FUNC_ESUPP;
626*4882a593Smuzhiyun break;
627*4882a593Smuzhiyun default:
628*4882a593Smuzhiyun /* Allow TMF response codes to propagate upwards */
629*4882a593Smuzhiyun res = tcs.dl_opcode;
630*4882a593Smuzhiyun break;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun return res;
633*4882a593Smuzhiyun out_err:
634*4882a593Smuzhiyun asd_ascb_free(ascb);
635*4882a593Smuzhiyun return res;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
asd_abort_task_set(struct domain_device * dev,u8 * lun)638*4882a593Smuzhiyun int asd_abort_task_set(struct domain_device *dev, u8 *lun)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (res == TMF_RESP_FUNC_COMPLETE)
643*4882a593Smuzhiyun asd_clear_nexus_I_T_L(dev, lun);
644*4882a593Smuzhiyun return res;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
asd_clear_aca(struct domain_device * dev,u8 * lun)647*4882a593Smuzhiyun int asd_clear_aca(struct domain_device *dev, u8 *lun)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (res == TMF_RESP_FUNC_COMPLETE)
652*4882a593Smuzhiyun asd_clear_nexus_I_T_L(dev, lun);
653*4882a593Smuzhiyun return res;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
asd_clear_task_set(struct domain_device * dev,u8 * lun)656*4882a593Smuzhiyun int asd_clear_task_set(struct domain_device *dev, u8 *lun)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (res == TMF_RESP_FUNC_COMPLETE)
661*4882a593Smuzhiyun asd_clear_nexus_I_T_L(dev, lun);
662*4882a593Smuzhiyun return res;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
asd_lu_reset(struct domain_device * dev,u8 * lun)665*4882a593Smuzhiyun int asd_lu_reset(struct domain_device *dev, u8 *lun)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (res == TMF_RESP_FUNC_COMPLETE)
670*4882a593Smuzhiyun asd_clear_nexus_I_T_L(dev, lun);
671*4882a593Smuzhiyun return res;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /**
675*4882a593Smuzhiyun * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
676*4882a593Smuzhiyun * @task: pointer to sas_task struct of interest
677*4882a593Smuzhiyun *
678*4882a593Smuzhiyun * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
679*4882a593Smuzhiyun * or TMF_RESP_FUNC_SUCC if the task is in the task set.
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun * Normally the management layer sets the task to aborted state,
682*4882a593Smuzhiyun * and then calls query task and then abort task.
683*4882a593Smuzhiyun */
asd_query_task(struct sas_task * task)684*4882a593Smuzhiyun int asd_query_task(struct sas_task *task)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun struct asd_ascb *ascb = task->lldd_task;
687*4882a593Smuzhiyun int index;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (ascb) {
690*4882a593Smuzhiyun index = ascb->tc_index;
691*4882a593Smuzhiyun return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
692*4882a593Smuzhiyun TMF_QUERY_TASK, index);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun return TMF_RESP_FUNC_COMPLETE;
695*4882a593Smuzhiyun }
696