1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
3*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * GPL LICENSE SUMMARY
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16*4882a593Smuzhiyun * General Public License for more details.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
19*4882a593Smuzhiyun * along with this program; if not, write to the Free Software
20*4882a593Smuzhiyun * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21*4882a593Smuzhiyun * The full GNU General Public License is included in this distribution
22*4882a593Smuzhiyun * in the file called LICENSE.GPL.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * BSD LICENSE
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27*4882a593Smuzhiyun * All rights reserved.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
30*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
31*4882a593Smuzhiyun * are met:
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
34*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
35*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
36*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
37*4882a593Smuzhiyun * the documentation and/or other materials provided with the
38*4882a593Smuzhiyun * distribution.
39*4882a593Smuzhiyun * * Neither the name of Intel Corporation nor the names of its
40*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
41*4882a593Smuzhiyun * from this software without specific prior written permission.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
57*4882a593Smuzhiyun #include "isci.h"
58*4882a593Smuzhiyun #include "task.h"
59*4882a593Smuzhiyun #include "request.h"
60*4882a593Smuzhiyun #include "scu_completion_codes.h"
61*4882a593Smuzhiyun #include "scu_event_codes.h"
62*4882a593Smuzhiyun #include "sas.h"
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #undef C
65*4882a593Smuzhiyun #define C(a) (#a)
req_state_name(enum sci_base_request_states state)66*4882a593Smuzhiyun const char *req_state_name(enum sci_base_request_states state)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun static const char * const strings[] = REQUEST_STATES;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun return strings[state];
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun #undef C
73*4882a593Smuzhiyun
to_sgl_element_pair(struct isci_request * ireq,int idx)74*4882a593Smuzhiyun static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
75*4882a593Smuzhiyun int idx)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun if (idx == 0)
78*4882a593Smuzhiyun return &ireq->tc->sgl_pair_ab;
79*4882a593Smuzhiyun else if (idx == 1)
80*4882a593Smuzhiyun return &ireq->tc->sgl_pair_cd;
81*4882a593Smuzhiyun else if (idx < 0)
82*4882a593Smuzhiyun return NULL;
83*4882a593Smuzhiyun else
84*4882a593Smuzhiyun return &ireq->sg_table[idx - 2];
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
to_sgl_element_pair_dma(struct isci_host * ihost,struct isci_request * ireq,u32 idx)87*4882a593Smuzhiyun static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
88*4882a593Smuzhiyun struct isci_request *ireq, u32 idx)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun u32 offset;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (idx == 0) {
93*4882a593Smuzhiyun offset = (void *) &ireq->tc->sgl_pair_ab -
94*4882a593Smuzhiyun (void *) &ihost->task_context_table[0];
95*4882a593Smuzhiyun return ihost->tc_dma + offset;
96*4882a593Smuzhiyun } else if (idx == 1) {
97*4882a593Smuzhiyun offset = (void *) &ireq->tc->sgl_pair_cd -
98*4882a593Smuzhiyun (void *) &ihost->task_context_table[0];
99*4882a593Smuzhiyun return ihost->tc_dma + offset;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
init_sgl_element(struct scu_sgl_element * e,struct scatterlist * sg)105*4882a593Smuzhiyun static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun e->length = sg_dma_len(sg);
108*4882a593Smuzhiyun e->address_upper = upper_32_bits(sg_dma_address(sg));
109*4882a593Smuzhiyun e->address_lower = lower_32_bits(sg_dma_address(sg));
110*4882a593Smuzhiyun e->address_modifier = 0;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
sci_request_build_sgl(struct isci_request * ireq)113*4882a593Smuzhiyun static void sci_request_build_sgl(struct isci_request *ireq)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct isci_host *ihost = ireq->isci_host;
116*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
117*4882a593Smuzhiyun struct scatterlist *sg = NULL;
118*4882a593Smuzhiyun dma_addr_t dma_addr;
119*4882a593Smuzhiyun u32 sg_idx = 0;
120*4882a593Smuzhiyun struct scu_sgl_element_pair *scu_sg = NULL;
121*4882a593Smuzhiyun struct scu_sgl_element_pair *prev_sg = NULL;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (task->num_scatter > 0) {
124*4882a593Smuzhiyun sg = task->scatter;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun while (sg) {
127*4882a593Smuzhiyun scu_sg = to_sgl_element_pair(ireq, sg_idx);
128*4882a593Smuzhiyun init_sgl_element(&scu_sg->A, sg);
129*4882a593Smuzhiyun sg = sg_next(sg);
130*4882a593Smuzhiyun if (sg) {
131*4882a593Smuzhiyun init_sgl_element(&scu_sg->B, sg);
132*4882a593Smuzhiyun sg = sg_next(sg);
133*4882a593Smuzhiyun } else
134*4882a593Smuzhiyun memset(&scu_sg->B, 0, sizeof(scu_sg->B));
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (prev_sg) {
137*4882a593Smuzhiyun dma_addr = to_sgl_element_pair_dma(ihost,
138*4882a593Smuzhiyun ireq,
139*4882a593Smuzhiyun sg_idx);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun prev_sg->next_pair_upper =
142*4882a593Smuzhiyun upper_32_bits(dma_addr);
143*4882a593Smuzhiyun prev_sg->next_pair_lower =
144*4882a593Smuzhiyun lower_32_bits(dma_addr);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun prev_sg = scu_sg;
148*4882a593Smuzhiyun sg_idx++;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun } else { /* handle when no sg */
151*4882a593Smuzhiyun scu_sg = to_sgl_element_pair(ireq, sg_idx);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun dma_addr = dma_map_single(&ihost->pdev->dev,
154*4882a593Smuzhiyun task->scatter,
155*4882a593Smuzhiyun task->total_xfer_len,
156*4882a593Smuzhiyun task->data_dir);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun ireq->zero_scatter_daddr = dma_addr;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun scu_sg->A.length = task->total_xfer_len;
161*4882a593Smuzhiyun scu_sg->A.address_upper = upper_32_bits(dma_addr);
162*4882a593Smuzhiyun scu_sg->A.address_lower = lower_32_bits(dma_addr);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (scu_sg) {
166*4882a593Smuzhiyun scu_sg->next_pair_upper = 0;
167*4882a593Smuzhiyun scu_sg->next_pair_lower = 0;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
sci_io_request_build_ssp_command_iu(struct isci_request * ireq)171*4882a593Smuzhiyun static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun struct ssp_cmd_iu *cmd_iu;
174*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun cmd_iu = &ireq->ssp.cmd;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
179*4882a593Smuzhiyun cmd_iu->add_cdb_len = 0;
180*4882a593Smuzhiyun cmd_iu->_r_a = 0;
181*4882a593Smuzhiyun cmd_iu->_r_b = 0;
182*4882a593Smuzhiyun cmd_iu->en_fburst = 0; /* unsupported */
183*4882a593Smuzhiyun cmd_iu->task_prio = task->ssp_task.task_prio;
184*4882a593Smuzhiyun cmd_iu->task_attr = task->ssp_task.task_attr;
185*4882a593Smuzhiyun cmd_iu->_r_c = 0;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
188*4882a593Smuzhiyun (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
sci_task_request_build_ssp_task_iu(struct isci_request * ireq)191*4882a593Smuzhiyun static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct ssp_task_iu *task_iu;
194*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
195*4882a593Smuzhiyun struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun task_iu = &ireq->ssp.tmf;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun memset(task_iu, 0, sizeof(struct ssp_task_iu));
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun task_iu->task_func = isci_tmf->tmf_code;
204*4882a593Smuzhiyun task_iu->task_tag =
205*4882a593Smuzhiyun (test_bit(IREQ_TMF, &ireq->flags)) ?
206*4882a593Smuzhiyun isci_tmf->io_tag :
207*4882a593Smuzhiyun SCI_CONTROLLER_INVALID_IO_TAG;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun * This method is will fill in the SCU Task Context for any type of SSP request.
212*4882a593Smuzhiyun * @sci_req:
213*4882a593Smuzhiyun * @task_context:
214*4882a593Smuzhiyun *
215*4882a593Smuzhiyun */
scu_ssp_request_construct_task_context(struct isci_request * ireq,struct scu_task_context * task_context)216*4882a593Smuzhiyun static void scu_ssp_request_construct_task_context(
217*4882a593Smuzhiyun struct isci_request *ireq,
218*4882a593Smuzhiyun struct scu_task_context *task_context)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun dma_addr_t dma_addr;
221*4882a593Smuzhiyun struct isci_remote_device *idev;
222*4882a593Smuzhiyun struct isci_port *iport;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun idev = ireq->target_device;
225*4882a593Smuzhiyun iport = idev->owning_port;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* Fill in the TC with its required data */
228*4882a593Smuzhiyun task_context->abort = 0;
229*4882a593Smuzhiyun task_context->priority = 0;
230*4882a593Smuzhiyun task_context->initiator_request = 1;
231*4882a593Smuzhiyun task_context->connection_rate = idev->connection_rate;
232*4882a593Smuzhiyun task_context->protocol_engine_index = ISCI_PEG;
233*4882a593Smuzhiyun task_context->logical_port_index = iport->physical_port_index;
234*4882a593Smuzhiyun task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
235*4882a593Smuzhiyun task_context->valid = SCU_TASK_CONTEXT_VALID;
236*4882a593Smuzhiyun task_context->context_type = SCU_TASK_CONTEXT_TYPE;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun task_context->remote_node_index = idev->rnc.remote_node_index;
239*4882a593Smuzhiyun task_context->command_code = 0;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun task_context->link_layer_control = 0;
242*4882a593Smuzhiyun task_context->do_not_dma_ssp_good_response = 1;
243*4882a593Smuzhiyun task_context->strict_ordering = 0;
244*4882a593Smuzhiyun task_context->control_frame = 0;
245*4882a593Smuzhiyun task_context->timeout_enable = 0;
246*4882a593Smuzhiyun task_context->block_guard_enable = 0;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun task_context->address_modifier = 0;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* task_context->type.ssp.tag = ireq->io_tag; */
251*4882a593Smuzhiyun task_context->task_phase = 0x01;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
254*4882a593Smuzhiyun (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
255*4882a593Smuzhiyun (iport->physical_port_index <<
256*4882a593Smuzhiyun SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
257*4882a593Smuzhiyun ISCI_TAG_TCI(ireq->io_tag));
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Copy the physical address for the command buffer to the
261*4882a593Smuzhiyun * SCU Task Context
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun task_context->command_iu_upper = upper_32_bits(dma_addr);
266*4882a593Smuzhiyun task_context->command_iu_lower = lower_32_bits(dma_addr);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * Copy the physical address for the response buffer to the
270*4882a593Smuzhiyun * SCU Task Context
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun task_context->response_iu_upper = upper_32_bits(dma_addr);
275*4882a593Smuzhiyun task_context->response_iu_lower = lower_32_bits(dma_addr);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
scu_bg_blk_size(struct scsi_device * sdp)278*4882a593Smuzhiyun static u8 scu_bg_blk_size(struct scsi_device *sdp)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun switch (sdp->sector_size) {
281*4882a593Smuzhiyun case 512:
282*4882a593Smuzhiyun return 0;
283*4882a593Smuzhiyun case 1024:
284*4882a593Smuzhiyun return 1;
285*4882a593Smuzhiyun case 4096:
286*4882a593Smuzhiyun return 3;
287*4882a593Smuzhiyun default:
288*4882a593Smuzhiyun return 0xff;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
scu_dif_bytes(u32 len,u32 sector_size)292*4882a593Smuzhiyun static u32 scu_dif_bytes(u32 len, u32 sector_size)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun return (len >> ilog2(sector_size)) * 8;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
scu_ssp_ireq_dif_insert(struct isci_request * ireq,u8 type,u8 op)297*4882a593Smuzhiyun static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct scu_task_context *tc = ireq->tc;
300*4882a593Smuzhiyun struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
301*4882a593Smuzhiyun u8 blk_sz = scu_bg_blk_size(scmd->device);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun tc->block_guard_enable = 1;
304*4882a593Smuzhiyun tc->blk_prot_en = 1;
305*4882a593Smuzhiyun tc->blk_sz = blk_sz;
306*4882a593Smuzhiyun /* DIF write insert */
307*4882a593Smuzhiyun tc->blk_prot_func = 0x2;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
310*4882a593Smuzhiyun scmd->device->sector_size);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* always init to 0, used by hw */
313*4882a593Smuzhiyun tc->interm_crc_val = 0;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun tc->init_crc_seed = 0;
316*4882a593Smuzhiyun tc->app_tag_verify = 0;
317*4882a593Smuzhiyun tc->app_tag_gen = 0;
318*4882a593Smuzhiyun tc->ref_tag_seed_verify = 0;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* always init to same as bg_blk_sz */
321*4882a593Smuzhiyun tc->UD_bytes_immed_val = scmd->device->sector_size;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun tc->reserved_DC_0 = 0;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* always init to 8 */
326*4882a593Smuzhiyun tc->DIF_bytes_immed_val = 8;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun tc->reserved_DC_1 = 0;
329*4882a593Smuzhiyun tc->bgc_blk_sz = scmd->device->sector_size;
330*4882a593Smuzhiyun tc->reserved_E0_0 = 0;
331*4882a593Smuzhiyun tc->app_tag_gen_mask = 0;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /** setup block guard control **/
334*4882a593Smuzhiyun tc->bgctl = 0;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* DIF write insert */
337*4882a593Smuzhiyun tc->bgctl_f.op = 0x2;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun tc->app_tag_verify_mask = 0;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* must init to 0 for hw */
342*4882a593Smuzhiyun tc->blk_guard_err = 0;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun tc->reserved_E8_0 = 0;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
347*4882a593Smuzhiyun tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
348*4882a593Smuzhiyun else if (type & SCSI_PROT_DIF_TYPE3)
349*4882a593Smuzhiyun tc->ref_tag_seed_gen = 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
scu_ssp_ireq_dif_strip(struct isci_request * ireq,u8 type,u8 op)352*4882a593Smuzhiyun static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct scu_task_context *tc = ireq->tc;
355*4882a593Smuzhiyun struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
356*4882a593Smuzhiyun u8 blk_sz = scu_bg_blk_size(scmd->device);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun tc->block_guard_enable = 1;
359*4882a593Smuzhiyun tc->blk_prot_en = 1;
360*4882a593Smuzhiyun tc->blk_sz = blk_sz;
361*4882a593Smuzhiyun /* DIF read strip */
362*4882a593Smuzhiyun tc->blk_prot_func = 0x1;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
365*4882a593Smuzhiyun scmd->device->sector_size);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* always init to 0, used by hw */
368*4882a593Smuzhiyun tc->interm_crc_val = 0;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun tc->init_crc_seed = 0;
371*4882a593Smuzhiyun tc->app_tag_verify = 0;
372*4882a593Smuzhiyun tc->app_tag_gen = 0;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
375*4882a593Smuzhiyun tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
376*4882a593Smuzhiyun else if (type & SCSI_PROT_DIF_TYPE3)
377*4882a593Smuzhiyun tc->ref_tag_seed_verify = 0;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* always init to same as bg_blk_sz */
380*4882a593Smuzhiyun tc->UD_bytes_immed_val = scmd->device->sector_size;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun tc->reserved_DC_0 = 0;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* always init to 8 */
385*4882a593Smuzhiyun tc->DIF_bytes_immed_val = 8;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun tc->reserved_DC_1 = 0;
388*4882a593Smuzhiyun tc->bgc_blk_sz = scmd->device->sector_size;
389*4882a593Smuzhiyun tc->reserved_E0_0 = 0;
390*4882a593Smuzhiyun tc->app_tag_gen_mask = 0;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /** setup block guard control **/
393*4882a593Smuzhiyun tc->bgctl = 0;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* DIF read strip */
396*4882a593Smuzhiyun tc->bgctl_f.crc_verify = 1;
397*4882a593Smuzhiyun tc->bgctl_f.op = 0x1;
398*4882a593Smuzhiyun if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
399*4882a593Smuzhiyun tc->bgctl_f.ref_tag_chk = 1;
400*4882a593Smuzhiyun tc->bgctl_f.app_f_detect = 1;
401*4882a593Smuzhiyun } else if (type & SCSI_PROT_DIF_TYPE3)
402*4882a593Smuzhiyun tc->bgctl_f.app_ref_f_detect = 1;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun tc->app_tag_verify_mask = 0;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* must init to 0 for hw */
407*4882a593Smuzhiyun tc->blk_guard_err = 0;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun tc->reserved_E8_0 = 0;
410*4882a593Smuzhiyun tc->ref_tag_seed_gen = 0;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /**
414*4882a593Smuzhiyun * This method is will fill in the SCU Task Context for a SSP IO request.
415*4882a593Smuzhiyun * @sci_req:
416*4882a593Smuzhiyun *
417*4882a593Smuzhiyun */
scu_ssp_io_request_construct_task_context(struct isci_request * ireq,enum dma_data_direction dir,u32 len)418*4882a593Smuzhiyun static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
419*4882a593Smuzhiyun enum dma_data_direction dir,
420*4882a593Smuzhiyun u32 len)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct scu_task_context *task_context = ireq->tc;
423*4882a593Smuzhiyun struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
424*4882a593Smuzhiyun struct scsi_cmnd *scmd = sas_task->uldd_task;
425*4882a593Smuzhiyun u8 prot_type = scsi_get_prot_type(scmd);
426*4882a593Smuzhiyun u8 prot_op = scsi_get_prot_op(scmd);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun scu_ssp_request_construct_task_context(ireq, task_context);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun task_context->ssp_command_iu_length =
431*4882a593Smuzhiyun sizeof(struct ssp_cmd_iu) / sizeof(u32);
432*4882a593Smuzhiyun task_context->type.ssp.frame_type = SSP_COMMAND;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun switch (dir) {
435*4882a593Smuzhiyun case DMA_FROM_DEVICE:
436*4882a593Smuzhiyun case DMA_NONE:
437*4882a593Smuzhiyun default:
438*4882a593Smuzhiyun task_context->task_type = SCU_TASK_TYPE_IOREAD;
439*4882a593Smuzhiyun break;
440*4882a593Smuzhiyun case DMA_TO_DEVICE:
441*4882a593Smuzhiyun task_context->task_type = SCU_TASK_TYPE_IOWRITE;
442*4882a593Smuzhiyun break;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun task_context->transfer_length_bytes = len;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (task_context->transfer_length_bytes > 0)
448*4882a593Smuzhiyun sci_request_build_sgl(ireq);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (prot_type != SCSI_PROT_DIF_TYPE0) {
451*4882a593Smuzhiyun if (prot_op == SCSI_PROT_READ_STRIP)
452*4882a593Smuzhiyun scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
453*4882a593Smuzhiyun else if (prot_op == SCSI_PROT_WRITE_INSERT)
454*4882a593Smuzhiyun scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun * This method will fill in the SCU Task Context for a SSP Task request. The
460*4882a593Smuzhiyun * following important settings are utilized: -# priority ==
461*4882a593Smuzhiyun * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
462*4882a593Smuzhiyun * ahead of other task destined for the same Remote Node. -# task_type ==
463*4882a593Smuzhiyun * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
464*4882a593Smuzhiyun * (i.e. non-raw frame) is being utilized to perform task management. -#
465*4882a593Smuzhiyun * control_frame == 1. This ensures that the proper endianess is set so
466*4882a593Smuzhiyun * that the bytes are transmitted in the right order for a task frame.
467*4882a593Smuzhiyun * @sci_req: This parameter specifies the task request object being
468*4882a593Smuzhiyun * constructed.
469*4882a593Smuzhiyun *
470*4882a593Smuzhiyun */
scu_ssp_task_request_construct_task_context(struct isci_request * ireq)471*4882a593Smuzhiyun static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct scu_task_context *task_context = ireq->tc;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun scu_ssp_request_construct_task_context(ireq, task_context);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun task_context->control_frame = 1;
478*4882a593Smuzhiyun task_context->priority = SCU_TASK_PRIORITY_HIGH;
479*4882a593Smuzhiyun task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
480*4882a593Smuzhiyun task_context->transfer_length_bytes = 0;
481*4882a593Smuzhiyun task_context->type.ssp.frame_type = SSP_TASK;
482*4882a593Smuzhiyun task_context->ssp_command_iu_length =
483*4882a593Smuzhiyun sizeof(struct ssp_task_iu) / sizeof(u32);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun * This method is will fill in the SCU Task Context for any type of SATA
488*4882a593Smuzhiyun * request. This is called from the various SATA constructors.
489*4882a593Smuzhiyun * @sci_req: The general IO request object which is to be used in
490*4882a593Smuzhiyun * constructing the SCU task context.
491*4882a593Smuzhiyun * @task_context: The buffer pointer for the SCU task context which is being
492*4882a593Smuzhiyun * constructed.
493*4882a593Smuzhiyun *
494*4882a593Smuzhiyun * The general io request construction is complete. The buffer assignment for
495*4882a593Smuzhiyun * the command buffer is complete. none Revisit task context construction to
496*4882a593Smuzhiyun * determine what is common for SSP/SMP/STP task context structures.
497*4882a593Smuzhiyun */
scu_sata_request_construct_task_context(struct isci_request * ireq,struct scu_task_context * task_context)498*4882a593Smuzhiyun static void scu_sata_request_construct_task_context(
499*4882a593Smuzhiyun struct isci_request *ireq,
500*4882a593Smuzhiyun struct scu_task_context *task_context)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun dma_addr_t dma_addr;
503*4882a593Smuzhiyun struct isci_remote_device *idev;
504*4882a593Smuzhiyun struct isci_port *iport;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun idev = ireq->target_device;
507*4882a593Smuzhiyun iport = idev->owning_port;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* Fill in the TC with its required data */
510*4882a593Smuzhiyun task_context->abort = 0;
511*4882a593Smuzhiyun task_context->priority = SCU_TASK_PRIORITY_NORMAL;
512*4882a593Smuzhiyun task_context->initiator_request = 1;
513*4882a593Smuzhiyun task_context->connection_rate = idev->connection_rate;
514*4882a593Smuzhiyun task_context->protocol_engine_index = ISCI_PEG;
515*4882a593Smuzhiyun task_context->logical_port_index = iport->physical_port_index;
516*4882a593Smuzhiyun task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
517*4882a593Smuzhiyun task_context->valid = SCU_TASK_CONTEXT_VALID;
518*4882a593Smuzhiyun task_context->context_type = SCU_TASK_CONTEXT_TYPE;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun task_context->remote_node_index = idev->rnc.remote_node_index;
521*4882a593Smuzhiyun task_context->command_code = 0;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun task_context->link_layer_control = 0;
524*4882a593Smuzhiyun task_context->do_not_dma_ssp_good_response = 1;
525*4882a593Smuzhiyun task_context->strict_ordering = 0;
526*4882a593Smuzhiyun task_context->control_frame = 0;
527*4882a593Smuzhiyun task_context->timeout_enable = 0;
528*4882a593Smuzhiyun task_context->block_guard_enable = 0;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun task_context->address_modifier = 0;
531*4882a593Smuzhiyun task_context->task_phase = 0x01;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun task_context->ssp_command_iu_length =
534*4882a593Smuzhiyun (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* Set the first word of the H2D REG FIS */
537*4882a593Smuzhiyun task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
540*4882a593Smuzhiyun (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
541*4882a593Smuzhiyun (iport->physical_port_index <<
542*4882a593Smuzhiyun SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
543*4882a593Smuzhiyun ISCI_TAG_TCI(ireq->io_tag));
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun * Copy the physical address for the command buffer to the SCU Task
546*4882a593Smuzhiyun * Context. We must offset the command buffer by 4 bytes because the
547*4882a593Smuzhiyun * first 4 bytes are transfered in the body of the TC.
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun dma_addr = sci_io_request_get_dma_addr(ireq,
550*4882a593Smuzhiyun ((char *) &ireq->stp.cmd) +
551*4882a593Smuzhiyun sizeof(u32));
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun task_context->command_iu_upper = upper_32_bits(dma_addr);
554*4882a593Smuzhiyun task_context->command_iu_lower = lower_32_bits(dma_addr);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* SATA Requests do not have a response buffer */
557*4882a593Smuzhiyun task_context->response_iu_upper = 0;
558*4882a593Smuzhiyun task_context->response_iu_lower = 0;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
scu_stp_raw_request_construct_task_context(struct isci_request * ireq)561*4882a593Smuzhiyun static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct scu_task_context *task_context = ireq->tc;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun scu_sata_request_construct_task_context(ireq, task_context);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun task_context->control_frame = 0;
568*4882a593Smuzhiyun task_context->priority = SCU_TASK_PRIORITY_NORMAL;
569*4882a593Smuzhiyun task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
570*4882a593Smuzhiyun task_context->type.stp.fis_type = FIS_REGH2D;
571*4882a593Smuzhiyun task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
sci_stp_pio_request_construct(struct isci_request * ireq,bool copy_rx_frame)574*4882a593Smuzhiyun static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
575*4882a593Smuzhiyun bool copy_rx_frame)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun struct isci_stp_request *stp_req = &ireq->stp.req;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun scu_stp_raw_request_construct_task_context(ireq);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun stp_req->status = 0;
582*4882a593Smuzhiyun stp_req->sgl.offset = 0;
583*4882a593Smuzhiyun stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (copy_rx_frame) {
586*4882a593Smuzhiyun sci_request_build_sgl(ireq);
587*4882a593Smuzhiyun stp_req->sgl.index = 0;
588*4882a593Smuzhiyun } else {
589*4882a593Smuzhiyun /* The user does not want the data copied to the SGL buffer location */
590*4882a593Smuzhiyun stp_req->sgl.index = -1;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun return SCI_SUCCESS;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /**
597*4882a593Smuzhiyun *
598*4882a593Smuzhiyun * @sci_req: This parameter specifies the request to be constructed as an
599*4882a593Smuzhiyun * optimized request.
600*4882a593Smuzhiyun * @optimized_task_type: This parameter specifies whether the request is to be
601*4882a593Smuzhiyun * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
602*4882a593Smuzhiyun * value of 1 indicates NCQ.
603*4882a593Smuzhiyun *
604*4882a593Smuzhiyun * This method will perform request construction common to all types of STP
605*4882a593Smuzhiyun * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
606*4882a593Smuzhiyun * returns an indication as to whether the construction was successful.
607*4882a593Smuzhiyun */
sci_stp_optimized_request_construct(struct isci_request * ireq,u8 optimized_task_type,u32 len,enum dma_data_direction dir)608*4882a593Smuzhiyun static void sci_stp_optimized_request_construct(struct isci_request *ireq,
609*4882a593Smuzhiyun u8 optimized_task_type,
610*4882a593Smuzhiyun u32 len,
611*4882a593Smuzhiyun enum dma_data_direction dir)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun struct scu_task_context *task_context = ireq->tc;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /* Build the STP task context structure */
616*4882a593Smuzhiyun scu_sata_request_construct_task_context(ireq, task_context);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* Copy over the SGL elements */
619*4882a593Smuzhiyun sci_request_build_sgl(ireq);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* Copy over the number of bytes to be transfered */
622*4882a593Smuzhiyun task_context->transfer_length_bytes = len;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if (dir == DMA_TO_DEVICE) {
625*4882a593Smuzhiyun /*
626*4882a593Smuzhiyun * The difference between the DMA IN and DMA OUT request task type
627*4882a593Smuzhiyun * values are consistent with the difference between FPDMA READ
628*4882a593Smuzhiyun * and FPDMA WRITE values. Add the supplied task type parameter
629*4882a593Smuzhiyun * to this difference to set the task type properly for this
630*4882a593Smuzhiyun * DATA OUT (WRITE) case. */
631*4882a593Smuzhiyun task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
632*4882a593Smuzhiyun - SCU_TASK_TYPE_DMA_IN);
633*4882a593Smuzhiyun } else {
634*4882a593Smuzhiyun /*
635*4882a593Smuzhiyun * For the DATA IN (READ) case, simply save the supplied
636*4882a593Smuzhiyun * optimized task type. */
637*4882a593Smuzhiyun task_context->task_type = optimized_task_type;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
sci_atapi_construct(struct isci_request * ireq)641*4882a593Smuzhiyun static void sci_atapi_construct(struct isci_request *ireq)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
644*4882a593Smuzhiyun struct sas_task *task;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /* To simplify the implementation we take advantage of the
647*4882a593Smuzhiyun * silicon's partial acceleration of atapi protocol (dma data
648*4882a593Smuzhiyun * transfers), so we promote all commands to dma protocol. This
649*4882a593Smuzhiyun * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
650*4882a593Smuzhiyun */
651*4882a593Smuzhiyun h2d_fis->features |= ATAPI_PKT_DMA;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun scu_stp_raw_request_construct_task_context(ireq);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun task = isci_request_access_task(ireq);
656*4882a593Smuzhiyun if (task->data_dir == DMA_NONE)
657*4882a593Smuzhiyun task->total_xfer_len = 0;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /* clear the response so we can detect arrivial of an
660*4882a593Smuzhiyun * unsolicited h2d fis
661*4882a593Smuzhiyun */
662*4882a593Smuzhiyun ireq->stp.rsp.fis_type = 0;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun static enum sci_status
sci_io_request_construct_sata(struct isci_request * ireq,u32 len,enum dma_data_direction dir,bool copy)666*4882a593Smuzhiyun sci_io_request_construct_sata(struct isci_request *ireq,
667*4882a593Smuzhiyun u32 len,
668*4882a593Smuzhiyun enum dma_data_direction dir,
669*4882a593Smuzhiyun bool copy)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
672*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
673*4882a593Smuzhiyun struct domain_device *dev = ireq->target_device->domain_dev;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /* check for management protocols */
676*4882a593Smuzhiyun if (test_bit(IREQ_TMF, &ireq->flags)) {
677*4882a593Smuzhiyun struct isci_tmf *tmf = isci_request_access_tmf(ireq);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun dev_err(&ireq->owning_controller->pdev->dev,
680*4882a593Smuzhiyun "%s: Request 0x%p received un-handled SAT "
681*4882a593Smuzhiyun "management protocol 0x%x.\n",
682*4882a593Smuzhiyun __func__, ireq, tmf->tmf_code);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun return SCI_FAILURE;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (!sas_protocol_ata(task->task_proto)) {
688*4882a593Smuzhiyun dev_err(&ireq->owning_controller->pdev->dev,
689*4882a593Smuzhiyun "%s: Non-ATA protocol in SATA path: 0x%x\n",
690*4882a593Smuzhiyun __func__,
691*4882a593Smuzhiyun task->task_proto);
692*4882a593Smuzhiyun return SCI_FAILURE;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /* ATAPI */
697*4882a593Smuzhiyun if (dev->sata_dev.class == ATA_DEV_ATAPI &&
698*4882a593Smuzhiyun task->ata_task.fis.command == ATA_CMD_PACKET) {
699*4882a593Smuzhiyun sci_atapi_construct(ireq);
700*4882a593Smuzhiyun return SCI_SUCCESS;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* non data */
704*4882a593Smuzhiyun if (task->data_dir == DMA_NONE) {
705*4882a593Smuzhiyun scu_stp_raw_request_construct_task_context(ireq);
706*4882a593Smuzhiyun return SCI_SUCCESS;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /* NCQ */
710*4882a593Smuzhiyun if (task->ata_task.use_ncq) {
711*4882a593Smuzhiyun sci_stp_optimized_request_construct(ireq,
712*4882a593Smuzhiyun SCU_TASK_TYPE_FPDMAQ_READ,
713*4882a593Smuzhiyun len, dir);
714*4882a593Smuzhiyun return SCI_SUCCESS;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* DMA */
718*4882a593Smuzhiyun if (task->ata_task.dma_xfer) {
719*4882a593Smuzhiyun sci_stp_optimized_request_construct(ireq,
720*4882a593Smuzhiyun SCU_TASK_TYPE_DMA_IN,
721*4882a593Smuzhiyun len, dir);
722*4882a593Smuzhiyun return SCI_SUCCESS;
723*4882a593Smuzhiyun } else /* PIO */
724*4882a593Smuzhiyun return sci_stp_pio_request_construct(ireq, copy);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return status;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
sci_io_request_construct_basic_ssp(struct isci_request * ireq)729*4882a593Smuzhiyun static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun ireq->protocol = SAS_PROTOCOL_SSP;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun scu_ssp_io_request_construct_task_context(ireq,
736*4882a593Smuzhiyun task->data_dir,
737*4882a593Smuzhiyun task->total_xfer_len);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun sci_io_request_build_ssp_command_iu(ireq);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun return SCI_SUCCESS;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
sci_task_request_construct_ssp(struct isci_request * ireq)746*4882a593Smuzhiyun enum sci_status sci_task_request_construct_ssp(
747*4882a593Smuzhiyun struct isci_request *ireq)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun /* Construct the SSP Task SCU Task Context */
750*4882a593Smuzhiyun scu_ssp_task_request_construct_task_context(ireq);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* Fill in the SSP Task IU */
753*4882a593Smuzhiyun sci_task_request_build_ssp_task_iu(ireq);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun return SCI_SUCCESS;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
sci_io_request_construct_basic_sata(struct isci_request * ireq)760*4882a593Smuzhiyun static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun enum sci_status status;
763*4882a593Smuzhiyun bool copy = false;
764*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun ireq->protocol = SAS_PROTOCOL_STP;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun copy = (task->data_dir == DMA_NONE) ? false : true;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun status = sci_io_request_construct_sata(ireq,
771*4882a593Smuzhiyun task->total_xfer_len,
772*4882a593Smuzhiyun task->data_dir,
773*4882a593Smuzhiyun copy);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (status == SCI_SUCCESS)
776*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun return status;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /**
782*4882a593Smuzhiyun * sci_req_tx_bytes - bytes transferred when reply underruns request
783*4882a593Smuzhiyun * @ireq: request that was terminated early
784*4882a593Smuzhiyun */
785*4882a593Smuzhiyun #define SCU_TASK_CONTEXT_SRAM 0x200000
sci_req_tx_bytes(struct isci_request * ireq)786*4882a593Smuzhiyun static u32 sci_req_tx_bytes(struct isci_request *ireq)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
789*4882a593Smuzhiyun u32 ret_val = 0;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun if (readl(&ihost->smu_registers->address_modifier) == 0) {
792*4882a593Smuzhiyun void __iomem *scu_reg_base = ihost->scu_registers;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
795*4882a593Smuzhiyun * BAR1 is the scu_registers
796*4882a593Smuzhiyun * 0x20002C = 0x200000 + 0x2c
797*4882a593Smuzhiyun * = start of task context SRAM + offset of (type.ssp.data_offset)
798*4882a593Smuzhiyun * TCi is the io_tag of struct sci_request
799*4882a593Smuzhiyun */
800*4882a593Smuzhiyun ret_val = readl(scu_reg_base +
801*4882a593Smuzhiyun (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
802*4882a593Smuzhiyun ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun return ret_val;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
sci_request_start(struct isci_request * ireq)808*4882a593Smuzhiyun enum sci_status sci_request_start(struct isci_request *ireq)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun enum sci_base_request_states state;
811*4882a593Smuzhiyun struct scu_task_context *tc = ireq->tc;
812*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun state = ireq->sm.current_state_id;
815*4882a593Smuzhiyun if (state != SCI_REQ_CONSTRUCTED) {
816*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev,
817*4882a593Smuzhiyun "%s: SCIC IO Request requested to start while in wrong "
818*4882a593Smuzhiyun "state %d\n", __func__, state);
819*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun switch (tc->protocol_type) {
825*4882a593Smuzhiyun case SCU_TASK_CONTEXT_PROTOCOL_SMP:
826*4882a593Smuzhiyun case SCU_TASK_CONTEXT_PROTOCOL_SSP:
827*4882a593Smuzhiyun /* SSP/SMP Frame */
828*4882a593Smuzhiyun tc->type.ssp.tag = ireq->io_tag;
829*4882a593Smuzhiyun tc->type.ssp.target_port_transfer_tag = 0xFFFF;
830*4882a593Smuzhiyun break;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun case SCU_TASK_CONTEXT_PROTOCOL_STP:
833*4882a593Smuzhiyun /* STP/SATA Frame
834*4882a593Smuzhiyun * tc->type.stp.ncq_tag = ireq->ncq_tag;
835*4882a593Smuzhiyun */
836*4882a593Smuzhiyun break;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun case SCU_TASK_CONTEXT_PROTOCOL_NONE:
839*4882a593Smuzhiyun /* / @todo When do we set no protocol type? */
840*4882a593Smuzhiyun break;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun default:
843*4882a593Smuzhiyun /* This should never happen since we build the IO
844*4882a593Smuzhiyun * requests */
845*4882a593Smuzhiyun break;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* Add to the post_context the io tag value */
849*4882a593Smuzhiyun ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /* Everything is good go ahead and change state */
852*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STARTED);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun return SCI_SUCCESS;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun enum sci_status
sci_io_request_terminate(struct isci_request * ireq)858*4882a593Smuzhiyun sci_io_request_terminate(struct isci_request *ireq)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun enum sci_base_request_states state;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun state = ireq->sm.current_state_id;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun switch (state) {
865*4882a593Smuzhiyun case SCI_REQ_CONSTRUCTED:
866*4882a593Smuzhiyun /* Set to make sure no HW terminate posting is done: */
867*4882a593Smuzhiyun set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
868*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
869*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
870*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
871*4882a593Smuzhiyun return SCI_SUCCESS;
872*4882a593Smuzhiyun case SCI_REQ_STARTED:
873*4882a593Smuzhiyun case SCI_REQ_TASK_WAIT_TC_COMP:
874*4882a593Smuzhiyun case SCI_REQ_SMP_WAIT_RESP:
875*4882a593Smuzhiyun case SCI_REQ_SMP_WAIT_TC_COMP:
876*4882a593Smuzhiyun case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
877*4882a593Smuzhiyun case SCI_REQ_STP_UDMA_WAIT_D2H:
878*4882a593Smuzhiyun case SCI_REQ_STP_NON_DATA_WAIT_H2D:
879*4882a593Smuzhiyun case SCI_REQ_STP_NON_DATA_WAIT_D2H:
880*4882a593Smuzhiyun case SCI_REQ_STP_PIO_WAIT_H2D:
881*4882a593Smuzhiyun case SCI_REQ_STP_PIO_WAIT_FRAME:
882*4882a593Smuzhiyun case SCI_REQ_STP_PIO_DATA_IN:
883*4882a593Smuzhiyun case SCI_REQ_STP_PIO_DATA_OUT:
884*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_H2D:
885*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
886*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_D2H:
887*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_TC_COMP:
888*4882a593Smuzhiyun /* Fall through and change state to ABORTING... */
889*4882a593Smuzhiyun case SCI_REQ_TASK_WAIT_TC_RESP:
890*4882a593Smuzhiyun /* The task frame was already confirmed to have been
891*4882a593Smuzhiyun * sent by the SCU HW. Since the state machine is
892*4882a593Smuzhiyun * now only waiting for the task response itself,
893*4882a593Smuzhiyun * abort the request and complete it immediately
894*4882a593Smuzhiyun * and don't wait for the task response.
895*4882a593Smuzhiyun */
896*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
897*4882a593Smuzhiyun fallthrough; /* and handle like ABORTING */
898*4882a593Smuzhiyun case SCI_REQ_ABORTING:
899*4882a593Smuzhiyun if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
900*4882a593Smuzhiyun set_bit(IREQ_PENDING_ABORT, &ireq->flags);
901*4882a593Smuzhiyun else
902*4882a593Smuzhiyun clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
903*4882a593Smuzhiyun /* If the request is only waiting on the remote device
904*4882a593Smuzhiyun * suspension, return SUCCESS so the caller will wait too.
905*4882a593Smuzhiyun */
906*4882a593Smuzhiyun return SCI_SUCCESS;
907*4882a593Smuzhiyun case SCI_REQ_COMPLETED:
908*4882a593Smuzhiyun default:
909*4882a593Smuzhiyun dev_warn(&ireq->owning_controller->pdev->dev,
910*4882a593Smuzhiyun "%s: SCIC IO Request requested to abort while in wrong "
911*4882a593Smuzhiyun "state %d\n", __func__, ireq->sm.current_state_id);
912*4882a593Smuzhiyun break;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
sci_request_complete(struct isci_request * ireq)918*4882a593Smuzhiyun enum sci_status sci_request_complete(struct isci_request *ireq)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun enum sci_base_request_states state;
921*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun state = ireq->sm.current_state_id;
924*4882a593Smuzhiyun if (WARN_ONCE(state != SCI_REQ_COMPLETED,
925*4882a593Smuzhiyun "isci: request completion from wrong state (%s)\n",
926*4882a593Smuzhiyun req_state_name(state)))
927*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
930*4882a593Smuzhiyun sci_controller_release_frame(ihost,
931*4882a593Smuzhiyun ireq->saved_rx_frame_index);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /* XXX can we just stop the machine and remove the 'final' state? */
934*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_FINAL);
935*4882a593Smuzhiyun return SCI_SUCCESS;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
sci_io_request_event_handler(struct isci_request * ireq,u32 event_code)938*4882a593Smuzhiyun enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
939*4882a593Smuzhiyun u32 event_code)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun enum sci_base_request_states state;
942*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun state = ireq->sm.current_state_id;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (state != SCI_REQ_STP_PIO_DATA_IN) {
947*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
948*4882a593Smuzhiyun __func__, event_code, req_state_name(state));
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun switch (scu_get_event_specifier(event_code)) {
954*4882a593Smuzhiyun case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
955*4882a593Smuzhiyun /* We are waiting for data and the SCU has R_ERR the data frame.
956*4882a593Smuzhiyun * Go back to waiting for the D2H Register FIS
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
959*4882a593Smuzhiyun return SCI_SUCCESS;
960*4882a593Smuzhiyun default:
961*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
962*4882a593Smuzhiyun "%s: pio request unexpected event %#x\n",
963*4882a593Smuzhiyun __func__, event_code);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /* TODO Should we fail the PIO request when we get an
966*4882a593Smuzhiyun * unexpected event?
967*4882a593Smuzhiyun */
968*4882a593Smuzhiyun return SCI_FAILURE;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /*
973*4882a593Smuzhiyun * This function copies response data for requests returning response data
974*4882a593Smuzhiyun * instead of sense data.
975*4882a593Smuzhiyun * @sci_req: This parameter specifies the request object for which to copy
976*4882a593Smuzhiyun * the response data.
977*4882a593Smuzhiyun */
sci_io_request_copy_response(struct isci_request * ireq)978*4882a593Smuzhiyun static void sci_io_request_copy_response(struct isci_request *ireq)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun void *resp_buf;
981*4882a593Smuzhiyun u32 len;
982*4882a593Smuzhiyun struct ssp_response_iu *ssp_response;
983*4882a593Smuzhiyun struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun ssp_response = &ireq->ssp.rsp;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun resp_buf = &isci_tmf->resp.resp_iu;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun len = min_t(u32,
990*4882a593Smuzhiyun SSP_RESP_IU_MAX_SIZE,
991*4882a593Smuzhiyun be32_to_cpu(ssp_response->response_data_len));
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun memcpy(resp_buf, ssp_response->resp_data, len);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun static enum sci_status
request_started_state_tc_event(struct isci_request * ireq,u32 completion_code)997*4882a593Smuzhiyun request_started_state_tc_event(struct isci_request *ireq,
998*4882a593Smuzhiyun u32 completion_code)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun struct ssp_response_iu *resp_iu;
1001*4882a593Smuzhiyun u8 datapres;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
1004*4882a593Smuzhiyun * to determine SDMA status
1005*4882a593Smuzhiyun */
1006*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1007*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1008*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1009*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1010*4882a593Smuzhiyun break;
1011*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
1012*4882a593Smuzhiyun /* There are times when the SCU hardware will return an early
1013*4882a593Smuzhiyun * response because the io request specified more data than is
1014*4882a593Smuzhiyun * returned by the target device (mode pages, inquiry data,
1015*4882a593Smuzhiyun * etc.). We must check the response stats to see if this is
1016*4882a593Smuzhiyun * truly a failed request or a good request that just got
1017*4882a593Smuzhiyun * completed early.
1018*4882a593Smuzhiyun */
1019*4882a593Smuzhiyun struct ssp_response_iu *resp = &ireq->ssp.rsp;
1020*4882a593Smuzhiyun ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun sci_swab32_cpy(&ireq->ssp.rsp,
1023*4882a593Smuzhiyun &ireq->ssp.rsp,
1024*4882a593Smuzhiyun word_cnt);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun if (resp->status == 0) {
1027*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1028*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
1029*4882a593Smuzhiyun } else {
1030*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1031*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun break;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
1036*4882a593Smuzhiyun ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun sci_swab32_cpy(&ireq->ssp.rsp,
1039*4882a593Smuzhiyun &ireq->ssp.rsp,
1040*4882a593Smuzhiyun word_cnt);
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1043*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1044*4882a593Smuzhiyun break;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1048*4882a593Smuzhiyun /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
1049*4882a593Smuzhiyun * guaranteed to be received before this completion status is
1050*4882a593Smuzhiyun * posted?
1051*4882a593Smuzhiyun */
1052*4882a593Smuzhiyun resp_iu = &ireq->ssp.rsp;
1053*4882a593Smuzhiyun datapres = resp_iu->datapres;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun if (datapres == 1 || datapres == 2) {
1056*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1057*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1058*4882a593Smuzhiyun } else {
1059*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1060*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun break;
1063*4882a593Smuzhiyun /* only stp device gets suspended. */
1064*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1065*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1066*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1067*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1068*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1069*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1070*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1071*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1072*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1073*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1074*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1075*4882a593Smuzhiyun if (ireq->protocol == SAS_PROTOCOL_STP) {
1076*4882a593Smuzhiyun ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1077*4882a593Smuzhiyun SCU_COMPLETION_TL_STATUS_SHIFT;
1078*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
1079*4882a593Smuzhiyun } else {
1080*4882a593Smuzhiyun ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1081*4882a593Smuzhiyun SCU_COMPLETION_TL_STATUS_SHIFT;
1082*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun break;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun /* both stp/ssp device gets suspended */
1087*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1088*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1089*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1090*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1091*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1092*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1093*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1094*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1095*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1096*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1097*4882a593Smuzhiyun ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1098*4882a593Smuzhiyun SCU_COMPLETION_TL_STATUS_SHIFT;
1099*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
1100*4882a593Smuzhiyun break;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /* neither ssp nor stp gets suspended. */
1103*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1104*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1105*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1106*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1107*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1108*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1109*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1110*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1111*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1112*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1113*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1114*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1115*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1116*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1117*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1118*4882a593Smuzhiyun default:
1119*4882a593Smuzhiyun ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1120*4882a593Smuzhiyun SCU_COMPLETION_TL_STATUS_SHIFT;
1121*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1122*4882a593Smuzhiyun break;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /*
1126*4882a593Smuzhiyun * TODO: This is probably wrong for ACK/NAK timeout conditions
1127*4882a593Smuzhiyun */
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun /* In all cases we will treat this as the completion of the IO req. */
1130*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1131*4882a593Smuzhiyun return SCI_SUCCESS;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun static enum sci_status
request_aborting_state_tc_event(struct isci_request * ireq,u32 completion_code)1135*4882a593Smuzhiyun request_aborting_state_tc_event(struct isci_request *ireq,
1136*4882a593Smuzhiyun u32 completion_code)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1139*4882a593Smuzhiyun case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1140*4882a593Smuzhiyun case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1141*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
1142*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
1143*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1144*4882a593Smuzhiyun break;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun default:
1147*4882a593Smuzhiyun /* Unless we get some strange error wait for the task abort to complete
1148*4882a593Smuzhiyun * TODO: Should there be a state change for this completion?
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun break;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun return SCI_SUCCESS;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun
ssp_task_request_await_tc_event(struct isci_request * ireq,u32 completion_code)1156*4882a593Smuzhiyun static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1157*4882a593Smuzhiyun u32 completion_code)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1160*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1161*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1162*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1163*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1164*4882a593Smuzhiyun break;
1165*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1166*4882a593Smuzhiyun /* Currently, the decision is to simply allow the task request
1167*4882a593Smuzhiyun * to timeout if the task IU wasn't received successfully.
1168*4882a593Smuzhiyun * There is a potential for receiving multiple task responses if
1169*4882a593Smuzhiyun * we decide to send the task IU again.
1170*4882a593Smuzhiyun */
1171*4882a593Smuzhiyun dev_warn(&ireq->owning_controller->pdev->dev,
1172*4882a593Smuzhiyun "%s: TaskRequest:0x%p CompletionCode:%x - "
1173*4882a593Smuzhiyun "ACK/NAK timeout\n", __func__, ireq,
1174*4882a593Smuzhiyun completion_code);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1177*4882a593Smuzhiyun break;
1178*4882a593Smuzhiyun default:
1179*4882a593Smuzhiyun /*
1180*4882a593Smuzhiyun * All other completion status cause the IO to be complete.
1181*4882a593Smuzhiyun * If a NAK was received, then it is up to the user to retry
1182*4882a593Smuzhiyun * the request.
1183*4882a593Smuzhiyun */
1184*4882a593Smuzhiyun ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1185*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1186*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1187*4882a593Smuzhiyun break;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun return SCI_SUCCESS;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun static enum sci_status
smp_request_await_response_tc_event(struct isci_request * ireq,u32 completion_code)1194*4882a593Smuzhiyun smp_request_await_response_tc_event(struct isci_request *ireq,
1195*4882a593Smuzhiyun u32 completion_code)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1198*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1199*4882a593Smuzhiyun /* In the AWAIT RESPONSE state, any TC completion is
1200*4882a593Smuzhiyun * unexpected. but if the TC has success status, we
1201*4882a593Smuzhiyun * complete the IO anyway.
1202*4882a593Smuzhiyun */
1203*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1204*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1205*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1206*4882a593Smuzhiyun break;
1207*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1208*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1209*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1210*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1211*4882a593Smuzhiyun /* These status has been seen in a specific LSI
1212*4882a593Smuzhiyun * expander, which sometimes is not able to send smp
1213*4882a593Smuzhiyun * response within 2 ms. This causes our hardware break
1214*4882a593Smuzhiyun * the connection and set TC completion with one of
1215*4882a593Smuzhiyun * these SMP_XXX_XX_ERR status. For these type of error,
1216*4882a593Smuzhiyun * we ask ihost user to retry the request.
1217*4882a593Smuzhiyun */
1218*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1219*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1220*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1221*4882a593Smuzhiyun break;
1222*4882a593Smuzhiyun default:
1223*4882a593Smuzhiyun /* All other completion status cause the IO to be complete. If a NAK
1224*4882a593Smuzhiyun * was received, then it is up to the user to retry the request
1225*4882a593Smuzhiyun */
1226*4882a593Smuzhiyun ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1227*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1228*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1229*4882a593Smuzhiyun break;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun return SCI_SUCCESS;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun static enum sci_status
smp_request_await_tc_event(struct isci_request * ireq,u32 completion_code)1236*4882a593Smuzhiyun smp_request_await_tc_event(struct isci_request *ireq,
1237*4882a593Smuzhiyun u32 completion_code)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1240*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1241*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1242*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1243*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1244*4882a593Smuzhiyun break;
1245*4882a593Smuzhiyun default:
1246*4882a593Smuzhiyun /* All other completion status cause the IO to be
1247*4882a593Smuzhiyun * complete. If a NAK was received, then it is up to
1248*4882a593Smuzhiyun * the user to retry the request.
1249*4882a593Smuzhiyun */
1250*4882a593Smuzhiyun ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1251*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1252*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1253*4882a593Smuzhiyun break;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun return SCI_SUCCESS;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
pio_sgl_next(struct isci_stp_request * stp_req)1259*4882a593Smuzhiyun static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun struct scu_sgl_element *sgl;
1262*4882a593Smuzhiyun struct scu_sgl_element_pair *sgl_pair;
1263*4882a593Smuzhiyun struct isci_request *ireq = to_ireq(stp_req);
1264*4882a593Smuzhiyun struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1267*4882a593Smuzhiyun if (!sgl_pair)
1268*4882a593Smuzhiyun sgl = NULL;
1269*4882a593Smuzhiyun else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1270*4882a593Smuzhiyun if (sgl_pair->B.address_lower == 0 &&
1271*4882a593Smuzhiyun sgl_pair->B.address_upper == 0) {
1272*4882a593Smuzhiyun sgl = NULL;
1273*4882a593Smuzhiyun } else {
1274*4882a593Smuzhiyun pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1275*4882a593Smuzhiyun sgl = &sgl_pair->B;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun } else {
1278*4882a593Smuzhiyun if (sgl_pair->next_pair_lower == 0 &&
1279*4882a593Smuzhiyun sgl_pair->next_pair_upper == 0) {
1280*4882a593Smuzhiyun sgl = NULL;
1281*4882a593Smuzhiyun } else {
1282*4882a593Smuzhiyun pio_sgl->index++;
1283*4882a593Smuzhiyun pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1284*4882a593Smuzhiyun sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1285*4882a593Smuzhiyun sgl = &sgl_pair->A;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun return sgl;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun static enum sci_status
stp_request_non_data_await_h2d_tc_event(struct isci_request * ireq,u32 completion_code)1293*4882a593Smuzhiyun stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1294*4882a593Smuzhiyun u32 completion_code)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1297*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1298*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1299*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1300*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1301*4882a593Smuzhiyun break;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun default:
1304*4882a593Smuzhiyun /* All other completion status cause the IO to be
1305*4882a593Smuzhiyun * complete. If a NAK was received, then it is up to
1306*4882a593Smuzhiyun * the user to retry the request.
1307*4882a593Smuzhiyun */
1308*4882a593Smuzhiyun ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1309*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1310*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1311*4882a593Smuzhiyun break;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun return SCI_SUCCESS;
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /* transmit DATA_FIS from (current sgl + offset) for input
1320*4882a593Smuzhiyun * parameter length. current sgl and offset is alreay stored in the IO request
1321*4882a593Smuzhiyun */
sci_stp_request_pio_data_out_trasmit_data_frame(struct isci_request * ireq,u32 length)1322*4882a593Smuzhiyun static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1323*4882a593Smuzhiyun struct isci_request *ireq,
1324*4882a593Smuzhiyun u32 length)
1325*4882a593Smuzhiyun {
1326*4882a593Smuzhiyun struct isci_stp_request *stp_req = &ireq->stp.req;
1327*4882a593Smuzhiyun struct scu_task_context *task_context = ireq->tc;
1328*4882a593Smuzhiyun struct scu_sgl_element_pair *sgl_pair;
1329*4882a593Smuzhiyun struct scu_sgl_element *current_sgl;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1332*4882a593Smuzhiyun * for the data from current_sgl+offset for the input length
1333*4882a593Smuzhiyun */
1334*4882a593Smuzhiyun sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1335*4882a593Smuzhiyun if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1336*4882a593Smuzhiyun current_sgl = &sgl_pair->A;
1337*4882a593Smuzhiyun else
1338*4882a593Smuzhiyun current_sgl = &sgl_pair->B;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun /* update the TC */
1341*4882a593Smuzhiyun task_context->command_iu_upper = current_sgl->address_upper;
1342*4882a593Smuzhiyun task_context->command_iu_lower = current_sgl->address_lower;
1343*4882a593Smuzhiyun task_context->transfer_length_bytes = length;
1344*4882a593Smuzhiyun task_context->type.stp.fis_type = FIS_DATA;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun /* send the new TC out. */
1347*4882a593Smuzhiyun return sci_controller_continue_io(ireq);
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
sci_stp_request_pio_data_out_transmit_data(struct isci_request * ireq)1350*4882a593Smuzhiyun static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun struct isci_stp_request *stp_req = &ireq->stp.req;
1353*4882a593Smuzhiyun struct scu_sgl_element_pair *sgl_pair;
1354*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
1355*4882a593Smuzhiyun struct scu_sgl_element *sgl;
1356*4882a593Smuzhiyun u32 offset;
1357*4882a593Smuzhiyun u32 len = 0;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun offset = stp_req->sgl.offset;
1360*4882a593Smuzhiyun sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1361*4882a593Smuzhiyun if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1362*4882a593Smuzhiyun return SCI_FAILURE;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1365*4882a593Smuzhiyun sgl = &sgl_pair->A;
1366*4882a593Smuzhiyun len = sgl_pair->A.length - offset;
1367*4882a593Smuzhiyun } else {
1368*4882a593Smuzhiyun sgl = &sgl_pair->B;
1369*4882a593Smuzhiyun len = sgl_pair->B.length - offset;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun if (stp_req->pio_len == 0)
1373*4882a593Smuzhiyun return SCI_SUCCESS;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun if (stp_req->pio_len >= len) {
1376*4882a593Smuzhiyun status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1377*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1378*4882a593Smuzhiyun return status;
1379*4882a593Smuzhiyun stp_req->pio_len -= len;
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun /* update the current sgl, offset and save for future */
1382*4882a593Smuzhiyun sgl = pio_sgl_next(stp_req);
1383*4882a593Smuzhiyun offset = 0;
1384*4882a593Smuzhiyun } else if (stp_req->pio_len < len) {
1385*4882a593Smuzhiyun sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun /* Sgl offset will be adjusted and saved for future */
1388*4882a593Smuzhiyun offset += stp_req->pio_len;
1389*4882a593Smuzhiyun sgl->address_lower += stp_req->pio_len;
1390*4882a593Smuzhiyun stp_req->pio_len = 0;
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun stp_req->sgl.offset = offset;
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun return status;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun /**
1399*4882a593Smuzhiyun *
1400*4882a593Smuzhiyun * @stp_request: The request that is used for the SGL processing.
1401*4882a593Smuzhiyun * @data_buffer: The buffer of data to be copied.
1402*4882a593Smuzhiyun * @length: The length of the data transfer.
1403*4882a593Smuzhiyun *
1404*4882a593Smuzhiyun * Copy the data from the buffer for the length specified to the IO request SGL
1405*4882a593Smuzhiyun * specified data region. enum sci_status
1406*4882a593Smuzhiyun */
1407*4882a593Smuzhiyun static enum sci_status
sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request * stp_req,u8 * data_buf,u32 len)1408*4882a593Smuzhiyun sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1409*4882a593Smuzhiyun u8 *data_buf, u32 len)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun struct isci_request *ireq;
1412*4882a593Smuzhiyun u8 *src_addr;
1413*4882a593Smuzhiyun int copy_len;
1414*4882a593Smuzhiyun struct sas_task *task;
1415*4882a593Smuzhiyun struct scatterlist *sg;
1416*4882a593Smuzhiyun void *kaddr;
1417*4882a593Smuzhiyun int total_len = len;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun ireq = to_ireq(stp_req);
1420*4882a593Smuzhiyun task = isci_request_access_task(ireq);
1421*4882a593Smuzhiyun src_addr = data_buf;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun if (task->num_scatter > 0) {
1424*4882a593Smuzhiyun sg = task->scatter;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun while (total_len > 0) {
1427*4882a593Smuzhiyun struct page *page = sg_page(sg);
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun copy_len = min_t(int, total_len, sg_dma_len(sg));
1430*4882a593Smuzhiyun kaddr = kmap_atomic(page);
1431*4882a593Smuzhiyun memcpy(kaddr + sg->offset, src_addr, copy_len);
1432*4882a593Smuzhiyun kunmap_atomic(kaddr);
1433*4882a593Smuzhiyun total_len -= copy_len;
1434*4882a593Smuzhiyun src_addr += copy_len;
1435*4882a593Smuzhiyun sg = sg_next(sg);
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun } else {
1438*4882a593Smuzhiyun BUG_ON(task->total_xfer_len < total_len);
1439*4882a593Smuzhiyun memcpy(task->scatter, src_addr, total_len);
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun return SCI_SUCCESS;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun /**
1446*4882a593Smuzhiyun *
1447*4882a593Smuzhiyun * @sci_req: The PIO DATA IN request that is to receive the data.
1448*4882a593Smuzhiyun * @data_buffer: The buffer to copy from.
1449*4882a593Smuzhiyun *
1450*4882a593Smuzhiyun * Copy the data buffer to the io request data region. enum sci_status
1451*4882a593Smuzhiyun */
sci_stp_request_pio_data_in_copy_data(struct isci_stp_request * stp_req,u8 * data_buffer)1452*4882a593Smuzhiyun static enum sci_status sci_stp_request_pio_data_in_copy_data(
1453*4882a593Smuzhiyun struct isci_stp_request *stp_req,
1454*4882a593Smuzhiyun u8 *data_buffer)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun enum sci_status status;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun /*
1459*4882a593Smuzhiyun * If there is less than 1K remaining in the transfer request
1460*4882a593Smuzhiyun * copy just the data for the transfer */
1461*4882a593Smuzhiyun if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1462*4882a593Smuzhiyun status = sci_stp_request_pio_data_in_copy_data_buffer(
1463*4882a593Smuzhiyun stp_req, data_buffer, stp_req->pio_len);
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun if (status == SCI_SUCCESS)
1466*4882a593Smuzhiyun stp_req->pio_len = 0;
1467*4882a593Smuzhiyun } else {
1468*4882a593Smuzhiyun /* We are transfering the whole frame so copy */
1469*4882a593Smuzhiyun status = sci_stp_request_pio_data_in_copy_data_buffer(
1470*4882a593Smuzhiyun stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun if (status == SCI_SUCCESS)
1473*4882a593Smuzhiyun stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun return status;
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun static enum sci_status
stp_request_pio_await_h2d_completion_tc_event(struct isci_request * ireq,u32 completion_code)1480*4882a593Smuzhiyun stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1481*4882a593Smuzhiyun u32 completion_code)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1486*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1487*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1488*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1489*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1490*4882a593Smuzhiyun break;
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun default:
1493*4882a593Smuzhiyun /* All other completion status cause the IO to be
1494*4882a593Smuzhiyun * complete. If a NAK was received, then it is up to
1495*4882a593Smuzhiyun * the user to retry the request.
1496*4882a593Smuzhiyun */
1497*4882a593Smuzhiyun ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1498*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1499*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1500*4882a593Smuzhiyun break;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun return status;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun static enum sci_status
pio_data_out_tx_done_tc_event(struct isci_request * ireq,u32 completion_code)1507*4882a593Smuzhiyun pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1508*4882a593Smuzhiyun u32 completion_code)
1509*4882a593Smuzhiyun {
1510*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
1511*4882a593Smuzhiyun bool all_frames_transferred = false;
1512*4882a593Smuzhiyun struct isci_stp_request *stp_req = &ireq->stp.req;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1515*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1516*4882a593Smuzhiyun /* Transmit data */
1517*4882a593Smuzhiyun if (stp_req->pio_len != 0) {
1518*4882a593Smuzhiyun status = sci_stp_request_pio_data_out_transmit_data(ireq);
1519*4882a593Smuzhiyun if (status == SCI_SUCCESS) {
1520*4882a593Smuzhiyun if (stp_req->pio_len == 0)
1521*4882a593Smuzhiyun all_frames_transferred = true;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun } else if (stp_req->pio_len == 0) {
1524*4882a593Smuzhiyun /*
1525*4882a593Smuzhiyun * this will happen if the all data is written at the
1526*4882a593Smuzhiyun * first time after the pio setup fis is received
1527*4882a593Smuzhiyun */
1528*4882a593Smuzhiyun all_frames_transferred = true;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun /* all data transferred. */
1532*4882a593Smuzhiyun if (all_frames_transferred) {
1533*4882a593Smuzhiyun /*
1534*4882a593Smuzhiyun * Change the state to SCI_REQ_STP_PIO_DATA_IN
1535*4882a593Smuzhiyun * and wait for PIO_SETUP fis / or D2H REg fis. */
1536*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun break;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun default:
1541*4882a593Smuzhiyun /*
1542*4882a593Smuzhiyun * All other completion status cause the IO to be complete.
1543*4882a593Smuzhiyun * If a NAK was received, then it is up to the user to retry
1544*4882a593Smuzhiyun * the request.
1545*4882a593Smuzhiyun */
1546*4882a593Smuzhiyun ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1547*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1548*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1549*4882a593Smuzhiyun break;
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun return status;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun
sci_stp_request_udma_general_frame_handler(struct isci_request * ireq,u32 frame_index)1555*4882a593Smuzhiyun static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1556*4882a593Smuzhiyun u32 frame_index)
1557*4882a593Smuzhiyun {
1558*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
1559*4882a593Smuzhiyun struct dev_to_host_fis *frame_header;
1560*4882a593Smuzhiyun enum sci_status status;
1561*4882a593Smuzhiyun u32 *frame_buffer;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1564*4882a593Smuzhiyun frame_index,
1565*4882a593Smuzhiyun (void **)&frame_header);
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun if ((status == SCI_SUCCESS) &&
1568*4882a593Smuzhiyun (frame_header->fis_type == FIS_REGD2H)) {
1569*4882a593Smuzhiyun sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1570*4882a593Smuzhiyun frame_index,
1571*4882a593Smuzhiyun (void **)&frame_buffer);
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun sci_controller_copy_sata_response(&ireq->stp.rsp,
1574*4882a593Smuzhiyun frame_header,
1575*4882a593Smuzhiyun frame_buffer);
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun
1578*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun return status;
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun
process_unsolicited_fis(struct isci_request * ireq,u32 frame_index)1583*4882a593Smuzhiyun static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
1584*4882a593Smuzhiyun u32 frame_index)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
1587*4882a593Smuzhiyun enum sci_status status;
1588*4882a593Smuzhiyun struct dev_to_host_fis *frame_header;
1589*4882a593Smuzhiyun u32 *frame_buffer;
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1592*4882a593Smuzhiyun frame_index,
1593*4882a593Smuzhiyun (void **)&frame_header);
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1596*4882a593Smuzhiyun return status;
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun if (frame_header->fis_type != FIS_REGD2H) {
1599*4882a593Smuzhiyun dev_err(&ireq->isci_host->pdev->dev,
1600*4882a593Smuzhiyun "%s ERROR: invalid fis type 0x%X\n",
1601*4882a593Smuzhiyun __func__, frame_header->fis_type);
1602*4882a593Smuzhiyun return SCI_FAILURE;
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1606*4882a593Smuzhiyun frame_index,
1607*4882a593Smuzhiyun (void **)&frame_buffer);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun sci_controller_copy_sata_response(&ireq->stp.rsp,
1610*4882a593Smuzhiyun (u32 *)frame_header,
1611*4882a593Smuzhiyun frame_buffer);
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun /* Frame has been decoded return it to the controller */
1614*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun return status;
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun
atapi_d2h_reg_frame_handler(struct isci_request * ireq,u32 frame_index)1619*4882a593Smuzhiyun static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1620*4882a593Smuzhiyun u32 frame_index)
1621*4882a593Smuzhiyun {
1622*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
1623*4882a593Smuzhiyun enum sci_status status;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun status = process_unsolicited_fis(ireq, frame_index);
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun if (status == SCI_SUCCESS) {
1628*4882a593Smuzhiyun if (ireq->stp.rsp.status & ATA_ERR)
1629*4882a593Smuzhiyun status = SCI_FAILURE_IO_RESPONSE_VALID;
1630*4882a593Smuzhiyun } else {
1631*4882a593Smuzhiyun status = SCI_FAILURE_IO_RESPONSE_VALID;
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun if (status != SCI_SUCCESS) {
1635*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1636*4882a593Smuzhiyun ireq->sci_status = status;
1637*4882a593Smuzhiyun } else {
1638*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1639*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun /* the d2h ufi is the end of non-data commands */
1643*4882a593Smuzhiyun if (task->data_dir == DMA_NONE)
1644*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun return status;
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun
scu_atapi_reconstruct_raw_frame_task_context(struct isci_request * ireq)1649*4882a593Smuzhiyun static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
1650*4882a593Smuzhiyun {
1651*4882a593Smuzhiyun struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1652*4882a593Smuzhiyun void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
1653*4882a593Smuzhiyun struct scu_task_context *task_context = ireq->tc;
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
1656*4882a593Smuzhiyun * type. The TC for previous Packet fis was already there, we only need to
1657*4882a593Smuzhiyun * change the H2D fis content.
1658*4882a593Smuzhiyun */
1659*4882a593Smuzhiyun memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
1660*4882a593Smuzhiyun memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
1661*4882a593Smuzhiyun memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
1662*4882a593Smuzhiyun task_context->type.stp.fis_type = FIS_DATA;
1663*4882a593Smuzhiyun task_context->transfer_length_bytes = dev->cdb_len;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun
scu_atapi_construct_task_context(struct isci_request * ireq)1666*4882a593Smuzhiyun static void scu_atapi_construct_task_context(struct isci_request *ireq)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1669*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
1670*4882a593Smuzhiyun struct scu_task_context *task_context = ireq->tc;
1671*4882a593Smuzhiyun int cdb_len = dev->cdb_len;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun /* reference: SSTL 1.13.4.2
1674*4882a593Smuzhiyun * task_type, sata_direction
1675*4882a593Smuzhiyun */
1676*4882a593Smuzhiyun if (task->data_dir == DMA_TO_DEVICE) {
1677*4882a593Smuzhiyun task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
1678*4882a593Smuzhiyun task_context->sata_direction = 0;
1679*4882a593Smuzhiyun } else {
1680*4882a593Smuzhiyun /* todo: for NO_DATA command, we need to send out raw frame. */
1681*4882a593Smuzhiyun task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
1682*4882a593Smuzhiyun task_context->sata_direction = 1;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
1686*4882a593Smuzhiyun task_context->type.stp.fis_type = FIS_DATA;
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
1689*4882a593Smuzhiyun memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
1690*4882a593Smuzhiyun task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun /* task phase is set to TX_CMD */
1693*4882a593Smuzhiyun task_context->task_phase = 0x1;
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun /* retry counter */
1696*4882a593Smuzhiyun task_context->stp_retry_count = 0;
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun /* data transfer size. */
1699*4882a593Smuzhiyun task_context->transfer_length_bytes = task->total_xfer_len;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun /* setup sgl */
1702*4882a593Smuzhiyun sci_request_build_sgl(ireq);
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun enum sci_status
sci_io_request_frame_handler(struct isci_request * ireq,u32 frame_index)1706*4882a593Smuzhiyun sci_io_request_frame_handler(struct isci_request *ireq,
1707*4882a593Smuzhiyun u32 frame_index)
1708*4882a593Smuzhiyun {
1709*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
1710*4882a593Smuzhiyun struct isci_stp_request *stp_req = &ireq->stp.req;
1711*4882a593Smuzhiyun enum sci_base_request_states state;
1712*4882a593Smuzhiyun enum sci_status status;
1713*4882a593Smuzhiyun ssize_t word_cnt;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun state = ireq->sm.current_state_id;
1716*4882a593Smuzhiyun switch (state) {
1717*4882a593Smuzhiyun case SCI_REQ_STARTED: {
1718*4882a593Smuzhiyun struct ssp_frame_hdr ssp_hdr;
1719*4882a593Smuzhiyun void *frame_header;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1722*4882a593Smuzhiyun frame_index,
1723*4882a593Smuzhiyun &frame_header);
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1726*4882a593Smuzhiyun sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun if (ssp_hdr.frame_type == SSP_RESPONSE) {
1729*4882a593Smuzhiyun struct ssp_response_iu *resp_iu;
1730*4882a593Smuzhiyun ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1733*4882a593Smuzhiyun frame_index,
1734*4882a593Smuzhiyun (void **)&resp_iu);
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun resp_iu = &ireq->ssp.rsp;
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun if (resp_iu->datapres == 0x01 ||
1741*4882a593Smuzhiyun resp_iu->datapres == 0x02) {
1742*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1743*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1744*4882a593Smuzhiyun } else {
1745*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1746*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun } else {
1749*4882a593Smuzhiyun /* not a response frame, why did it get forwarded? */
1750*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
1751*4882a593Smuzhiyun "%s: SCIC IO Request 0x%p received unexpected "
1752*4882a593Smuzhiyun "frame %d type 0x%02x\n", __func__, ireq,
1753*4882a593Smuzhiyun frame_index, ssp_hdr.frame_type);
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun /*
1757*4882a593Smuzhiyun * In any case we are done with this frame buffer return it to
1758*4882a593Smuzhiyun * the controller
1759*4882a593Smuzhiyun */
1760*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun return SCI_SUCCESS;
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun case SCI_REQ_TASK_WAIT_TC_RESP:
1766*4882a593Smuzhiyun sci_io_request_copy_response(ireq);
1767*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1768*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
1769*4882a593Smuzhiyun return SCI_SUCCESS;
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun case SCI_REQ_SMP_WAIT_RESP: {
1772*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
1773*4882a593Smuzhiyun struct scatterlist *sg = &task->smp_task.smp_resp;
1774*4882a593Smuzhiyun void *frame_header, *kaddr;
1775*4882a593Smuzhiyun u8 *rsp;
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1778*4882a593Smuzhiyun frame_index,
1779*4882a593Smuzhiyun &frame_header);
1780*4882a593Smuzhiyun kaddr = kmap_atomic(sg_page(sg));
1781*4882a593Smuzhiyun rsp = kaddr + sg->offset;
1782*4882a593Smuzhiyun sci_swab32_cpy(rsp, frame_header, 1);
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun if (rsp[0] == SMP_RESPONSE) {
1785*4882a593Smuzhiyun void *smp_resp;
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1788*4882a593Smuzhiyun frame_index,
1789*4882a593Smuzhiyun &smp_resp);
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun word_cnt = (sg->length/4)-1;
1792*4882a593Smuzhiyun if (word_cnt > 0)
1793*4882a593Smuzhiyun word_cnt = min_t(unsigned int, word_cnt,
1794*4882a593Smuzhiyun SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
1795*4882a593Smuzhiyun sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
1798*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
1799*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1800*4882a593Smuzhiyun } else {
1801*4882a593Smuzhiyun /*
1802*4882a593Smuzhiyun * This was not a response frame why did it get
1803*4882a593Smuzhiyun * forwarded?
1804*4882a593Smuzhiyun */
1805*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
1806*4882a593Smuzhiyun "%s: SCIC SMP Request 0x%p received unexpected "
1807*4882a593Smuzhiyun "frame %d type 0x%02x\n",
1808*4882a593Smuzhiyun __func__,
1809*4882a593Smuzhiyun ireq,
1810*4882a593Smuzhiyun frame_index,
1811*4882a593Smuzhiyun rsp[0]);
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1814*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1815*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun kunmap_atomic(kaddr);
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun return SCI_SUCCESS;
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1825*4882a593Smuzhiyun return sci_stp_request_udma_general_frame_handler(ireq,
1826*4882a593Smuzhiyun frame_index);
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun case SCI_REQ_STP_UDMA_WAIT_D2H:
1829*4882a593Smuzhiyun /* Use the general frame handler to copy the resposne data */
1830*4882a593Smuzhiyun status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1833*4882a593Smuzhiyun return status;
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1836*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1837*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1838*4882a593Smuzhiyun return SCI_SUCCESS;
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1841*4882a593Smuzhiyun struct dev_to_host_fis *frame_header;
1842*4882a593Smuzhiyun u32 *frame_buffer;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1845*4882a593Smuzhiyun frame_index,
1846*4882a593Smuzhiyun (void **)&frame_header);
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun if (status != SCI_SUCCESS) {
1849*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
1850*4882a593Smuzhiyun "%s: SCIC IO Request 0x%p could not get frame "
1851*4882a593Smuzhiyun "header for frame index %d, status %x\n",
1852*4882a593Smuzhiyun __func__,
1853*4882a593Smuzhiyun stp_req,
1854*4882a593Smuzhiyun frame_index,
1855*4882a593Smuzhiyun status);
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun return status;
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun switch (frame_header->fis_type) {
1861*4882a593Smuzhiyun case FIS_REGD2H:
1862*4882a593Smuzhiyun sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1863*4882a593Smuzhiyun frame_index,
1864*4882a593Smuzhiyun (void **)&frame_buffer);
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun sci_controller_copy_sata_response(&ireq->stp.rsp,
1867*4882a593Smuzhiyun frame_header,
1868*4882a593Smuzhiyun frame_buffer);
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun /* The command has completed with error */
1871*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1872*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1873*4882a593Smuzhiyun break;
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun default:
1876*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev,
1877*4882a593Smuzhiyun "%s: IO Request:0x%p Frame Id:%d protocol "
1878*4882a593Smuzhiyun "violation occurred\n", __func__, stp_req,
1879*4882a593Smuzhiyun frame_index);
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1882*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1883*4882a593Smuzhiyun break;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun /* Frame has been decoded return it to the controller */
1889*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun return status;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun case SCI_REQ_STP_PIO_WAIT_FRAME: {
1895*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
1896*4882a593Smuzhiyun struct dev_to_host_fis *frame_header;
1897*4882a593Smuzhiyun u32 *frame_buffer;
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1900*4882a593Smuzhiyun frame_index,
1901*4882a593Smuzhiyun (void **)&frame_header);
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun if (status != SCI_SUCCESS) {
1904*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
1905*4882a593Smuzhiyun "%s: SCIC IO Request 0x%p could not get frame "
1906*4882a593Smuzhiyun "header for frame index %d, status %x\n",
1907*4882a593Smuzhiyun __func__, stp_req, frame_index, status);
1908*4882a593Smuzhiyun return status;
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun switch (frame_header->fis_type) {
1912*4882a593Smuzhiyun case FIS_PIO_SETUP:
1913*4882a593Smuzhiyun /* Get from the frame buffer the PIO Setup Data */
1914*4882a593Smuzhiyun sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1915*4882a593Smuzhiyun frame_index,
1916*4882a593Smuzhiyun (void **)&frame_buffer);
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun /* Get the data from the PIO Setup The SCU Hardware
1919*4882a593Smuzhiyun * returns first word in the frame_header and the rest
1920*4882a593Smuzhiyun * of the data is in the frame buffer so we need to
1921*4882a593Smuzhiyun * back up one dword
1922*4882a593Smuzhiyun */
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun /* transfer_count: first 16bits in the 4th dword */
1925*4882a593Smuzhiyun stp_req->pio_len = frame_buffer[3] & 0xffff;
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun /* status: 4th byte in the 3rd dword */
1928*4882a593Smuzhiyun stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun sci_controller_copy_sata_response(&ireq->stp.rsp,
1931*4882a593Smuzhiyun frame_header,
1932*4882a593Smuzhiyun frame_buffer);
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun ireq->stp.rsp.status = stp_req->status;
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun /* The next state is dependent on whether the
1937*4882a593Smuzhiyun * request was PIO Data-in or Data out
1938*4882a593Smuzhiyun */
1939*4882a593Smuzhiyun if (task->data_dir == DMA_FROM_DEVICE) {
1940*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1941*4882a593Smuzhiyun } else if (task->data_dir == DMA_TO_DEVICE) {
1942*4882a593Smuzhiyun /* Transmit data */
1943*4882a593Smuzhiyun status = sci_stp_request_pio_data_out_transmit_data(ireq);
1944*4882a593Smuzhiyun if (status != SCI_SUCCESS)
1945*4882a593Smuzhiyun break;
1946*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun break;
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun case FIS_SETDEVBITS:
1951*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1952*4882a593Smuzhiyun break;
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun case FIS_REGD2H:
1955*4882a593Smuzhiyun if (frame_header->status & ATA_BUSY) {
1956*4882a593Smuzhiyun /*
1957*4882a593Smuzhiyun * Now why is the drive sending a D2H Register
1958*4882a593Smuzhiyun * FIS when it is still busy? Do nothing since
1959*4882a593Smuzhiyun * we are still in the right state.
1960*4882a593Smuzhiyun */
1961*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
1962*4882a593Smuzhiyun "%s: SCIC PIO Request 0x%p received "
1963*4882a593Smuzhiyun "D2H Register FIS with BSY status "
1964*4882a593Smuzhiyun "0x%x\n",
1965*4882a593Smuzhiyun __func__,
1966*4882a593Smuzhiyun stp_req,
1967*4882a593Smuzhiyun frame_header->status);
1968*4882a593Smuzhiyun break;
1969*4882a593Smuzhiyun }
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1972*4882a593Smuzhiyun frame_index,
1973*4882a593Smuzhiyun (void **)&frame_buffer);
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun sci_controller_copy_sata_response(&ireq->stp.rsp,
1976*4882a593Smuzhiyun frame_header,
1977*4882a593Smuzhiyun frame_buffer);
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1980*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1981*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1982*4882a593Smuzhiyun break;
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun default:
1985*4882a593Smuzhiyun /* FIXME: what do we do here? */
1986*4882a593Smuzhiyun break;
1987*4882a593Smuzhiyun }
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun /* Frame is decoded return it to the controller */
1990*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun return status;
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun case SCI_REQ_STP_PIO_DATA_IN: {
1996*4882a593Smuzhiyun struct dev_to_host_fis *frame_header;
1997*4882a593Smuzhiyun struct sata_fis_data *frame_buffer;
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
2000*4882a593Smuzhiyun frame_index,
2001*4882a593Smuzhiyun (void **)&frame_header);
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun if (status != SCI_SUCCESS) {
2004*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
2005*4882a593Smuzhiyun "%s: SCIC IO Request 0x%p could not get frame "
2006*4882a593Smuzhiyun "header for frame index %d, status %x\n",
2007*4882a593Smuzhiyun __func__,
2008*4882a593Smuzhiyun stp_req,
2009*4882a593Smuzhiyun frame_index,
2010*4882a593Smuzhiyun status);
2011*4882a593Smuzhiyun return status;
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun if (frame_header->fis_type != FIS_DATA) {
2015*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
2016*4882a593Smuzhiyun "%s: SCIC PIO Request 0x%p received frame %d "
2017*4882a593Smuzhiyun "with fis type 0x%02x when expecting a data "
2018*4882a593Smuzhiyun "fis.\n",
2019*4882a593Smuzhiyun __func__,
2020*4882a593Smuzhiyun stp_req,
2021*4882a593Smuzhiyun frame_index,
2022*4882a593Smuzhiyun frame_header->fis_type);
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
2025*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
2026*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun /* Frame is decoded return it to the controller */
2029*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
2030*4882a593Smuzhiyun return status;
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun if (stp_req->sgl.index < 0) {
2034*4882a593Smuzhiyun ireq->saved_rx_frame_index = frame_index;
2035*4882a593Smuzhiyun stp_req->pio_len = 0;
2036*4882a593Smuzhiyun } else {
2037*4882a593Smuzhiyun sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
2038*4882a593Smuzhiyun frame_index,
2039*4882a593Smuzhiyun (void **)&frame_buffer);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun status = sci_stp_request_pio_data_in_copy_data(stp_req,
2042*4882a593Smuzhiyun (u8 *)frame_buffer);
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun /* Frame is decoded return it to the controller */
2045*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
2046*4882a593Smuzhiyun }
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun /* Check for the end of the transfer, are there more
2049*4882a593Smuzhiyun * bytes remaining for this data transfer
2050*4882a593Smuzhiyun */
2051*4882a593Smuzhiyun if (status != SCI_SUCCESS || stp_req->pio_len != 0)
2052*4882a593Smuzhiyun return status;
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun if ((stp_req->status & ATA_BUSY) == 0) {
2055*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2056*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2057*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2058*4882a593Smuzhiyun } else {
2059*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun return status;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
2065*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
2068*4882a593Smuzhiyun ireq->target_device->working_request = ireq;
2069*4882a593Smuzhiyun if (task->data_dir == DMA_NONE) {
2070*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
2071*4882a593Smuzhiyun scu_atapi_reconstruct_raw_frame_task_context(ireq);
2072*4882a593Smuzhiyun } else {
2073*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2074*4882a593Smuzhiyun scu_atapi_construct_task_context(ireq);
2075*4882a593Smuzhiyun }
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun sci_controller_continue_io(ireq);
2078*4882a593Smuzhiyun return SCI_SUCCESS;
2079*4882a593Smuzhiyun }
2080*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_D2H:
2081*4882a593Smuzhiyun return atapi_d2h_reg_frame_handler(ireq, frame_index);
2082*4882a593Smuzhiyun case SCI_REQ_ABORTING:
2083*4882a593Smuzhiyun /*
2084*4882a593Smuzhiyun * TODO: Is it even possible to get an unsolicited frame in the
2085*4882a593Smuzhiyun * aborting state?
2086*4882a593Smuzhiyun */
2087*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
2088*4882a593Smuzhiyun return SCI_SUCCESS;
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun default:
2091*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev,
2092*4882a593Smuzhiyun "%s: SCIC IO Request given unexpected frame %x while "
2093*4882a593Smuzhiyun "in state %d\n",
2094*4882a593Smuzhiyun __func__,
2095*4882a593Smuzhiyun frame_index,
2096*4882a593Smuzhiyun state);
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun sci_controller_release_frame(ihost, frame_index);
2099*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun }
2102*4882a593Smuzhiyun
stp_request_udma_await_tc_event(struct isci_request * ireq,u32 completion_code)2103*4882a593Smuzhiyun static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
2104*4882a593Smuzhiyun u32 completion_code)
2105*4882a593Smuzhiyun {
2106*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2109*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2110*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
2111*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
2112*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2113*4882a593Smuzhiyun break;
2114*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2115*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2116*4882a593Smuzhiyun /* We must check ther response buffer to see if the D2H
2117*4882a593Smuzhiyun * Register FIS was received before we got the TC
2118*4882a593Smuzhiyun * completion.
2119*4882a593Smuzhiyun */
2120*4882a593Smuzhiyun if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2121*4882a593Smuzhiyun sci_remote_device_suspend(ireq->target_device,
2122*4882a593Smuzhiyun SCI_SW_SUSPEND_NORMAL);
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2125*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2126*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2127*4882a593Smuzhiyun } else {
2128*4882a593Smuzhiyun /* If we have an error completion status for the
2129*4882a593Smuzhiyun * TC then we can expect a D2H register FIS from
2130*4882a593Smuzhiyun * the device so we must change state to wait
2131*4882a593Smuzhiyun * for it
2132*4882a593Smuzhiyun */
2133*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun break;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun /* TODO Check to see if any of these completion status need to
2138*4882a593Smuzhiyun * wait for the device to host register fis.
2139*4882a593Smuzhiyun */
2140*4882a593Smuzhiyun /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2141*4882a593Smuzhiyun * - this comes only for B0
2142*4882a593Smuzhiyun */
2143*4882a593Smuzhiyun default:
2144*4882a593Smuzhiyun /* All other completion status cause the IO to be complete. */
2145*4882a593Smuzhiyun ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2146*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2147*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2148*4882a593Smuzhiyun break;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun
2151*4882a593Smuzhiyun return status;
2152*4882a593Smuzhiyun }
2153*4882a593Smuzhiyun
atapi_raw_completion(struct isci_request * ireq,u32 completion_code,enum sci_base_request_states next)2154*4882a593Smuzhiyun static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2155*4882a593Smuzhiyun enum sci_base_request_states next)
2156*4882a593Smuzhiyun {
2157*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
2158*4882a593Smuzhiyun
2159*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2160*4882a593Smuzhiyun case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2161*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
2162*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
2163*4882a593Smuzhiyun sci_change_state(&ireq->sm, next);
2164*4882a593Smuzhiyun break;
2165*4882a593Smuzhiyun default:
2166*4882a593Smuzhiyun /* All other completion status cause the IO to be complete.
2167*4882a593Smuzhiyun * If a NAK was received, then it is up to the user to retry
2168*4882a593Smuzhiyun * the request.
2169*4882a593Smuzhiyun */
2170*4882a593Smuzhiyun ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2171*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2174*4882a593Smuzhiyun break;
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun return status;
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
atapi_data_tc_completion_handler(struct isci_request * ireq,u32 completion_code)2180*4882a593Smuzhiyun static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
2181*4882a593Smuzhiyun u32 completion_code)
2182*4882a593Smuzhiyun {
2183*4882a593Smuzhiyun struct isci_remote_device *idev = ireq->target_device;
2184*4882a593Smuzhiyun struct dev_to_host_fis *d2h = &ireq->stp.rsp;
2185*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2188*4882a593Smuzhiyun case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
2189*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2190*4882a593Smuzhiyun break;
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
2193*4882a593Smuzhiyun u16 len = sci_req_tx_bytes(ireq);
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun /* likely non-error data underrrun, workaround missing
2196*4882a593Smuzhiyun * d2h frame from the controller
2197*4882a593Smuzhiyun */
2198*4882a593Smuzhiyun if (d2h->fis_type != FIS_REGD2H) {
2199*4882a593Smuzhiyun d2h->fis_type = FIS_REGD2H;
2200*4882a593Smuzhiyun d2h->flags = (1 << 6);
2201*4882a593Smuzhiyun d2h->status = 0x50;
2202*4882a593Smuzhiyun d2h->error = 0;
2203*4882a593Smuzhiyun d2h->lbal = 0;
2204*4882a593Smuzhiyun d2h->byte_count_low = len & 0xff;
2205*4882a593Smuzhiyun d2h->byte_count_high = len >> 8;
2206*4882a593Smuzhiyun d2h->device = 0xa0;
2207*4882a593Smuzhiyun d2h->lbal_exp = 0;
2208*4882a593Smuzhiyun d2h->lbam_exp = 0;
2209*4882a593Smuzhiyun d2h->lbah_exp = 0;
2210*4882a593Smuzhiyun d2h->_r_a = 0;
2211*4882a593Smuzhiyun d2h->sector_count = 0x3;
2212*4882a593Smuzhiyun d2h->sector_count_exp = 0;
2213*4882a593Smuzhiyun d2h->_r_b = 0;
2214*4882a593Smuzhiyun d2h->_r_c = 0;
2215*4882a593Smuzhiyun d2h->_r_d = 0;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
2219*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
2220*4882a593Smuzhiyun status = ireq->sci_status;
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun /* the hw will have suspended the rnc, so complete the
2223*4882a593Smuzhiyun * request upon pending resume
2224*4882a593Smuzhiyun */
2225*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2226*4882a593Smuzhiyun break;
2227*4882a593Smuzhiyun }
2228*4882a593Smuzhiyun case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
2229*4882a593Smuzhiyun /* In this case, there is no UF coming after.
2230*4882a593Smuzhiyun * compelte the IO now.
2231*4882a593Smuzhiyun */
2232*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_GOOD;
2233*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
2234*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2235*4882a593Smuzhiyun break;
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun default:
2238*4882a593Smuzhiyun if (d2h->fis_type == FIS_REGD2H) {
2239*4882a593Smuzhiyun /* UF received change the device state to ATAPI_ERROR */
2240*4882a593Smuzhiyun status = ireq->sci_status;
2241*4882a593Smuzhiyun sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2242*4882a593Smuzhiyun } else {
2243*4882a593Smuzhiyun /* If receiving any non-success TC status, no UF
2244*4882a593Smuzhiyun * received yet, then an UF for the status fis
2245*4882a593Smuzhiyun * is coming after (XXX: suspect this is
2246*4882a593Smuzhiyun * actually a protocol error or a bug like the
2247*4882a593Smuzhiyun * DONE_UNEXP_FIS case)
2248*4882a593Smuzhiyun */
2249*4882a593Smuzhiyun ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2250*4882a593Smuzhiyun ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun break;
2255*4882a593Smuzhiyun }
2256*4882a593Smuzhiyun
2257*4882a593Smuzhiyun return status;
2258*4882a593Smuzhiyun }
2259*4882a593Smuzhiyun
sci_request_smp_completion_status_is_tx_suspend(unsigned int completion_status)2260*4882a593Smuzhiyun static int sci_request_smp_completion_status_is_tx_suspend(
2261*4882a593Smuzhiyun unsigned int completion_status)
2262*4882a593Smuzhiyun {
2263*4882a593Smuzhiyun switch (completion_status) {
2264*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2265*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2266*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2267*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2268*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2269*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2270*4882a593Smuzhiyun return 1;
2271*4882a593Smuzhiyun }
2272*4882a593Smuzhiyun return 0;
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun
sci_request_smp_completion_status_is_tx_rx_suspend(unsigned int completion_status)2275*4882a593Smuzhiyun static int sci_request_smp_completion_status_is_tx_rx_suspend(
2276*4882a593Smuzhiyun unsigned int completion_status)
2277*4882a593Smuzhiyun {
2278*4882a593Smuzhiyun return 0; /* There are no Tx/Rx SMP suspend conditions. */
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun
sci_request_ssp_completion_status_is_tx_suspend(unsigned int completion_status)2281*4882a593Smuzhiyun static int sci_request_ssp_completion_status_is_tx_suspend(
2282*4882a593Smuzhiyun unsigned int completion_status)
2283*4882a593Smuzhiyun {
2284*4882a593Smuzhiyun switch (completion_status) {
2285*4882a593Smuzhiyun case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2286*4882a593Smuzhiyun case SCU_TASK_DONE_LF_ERR:
2287*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2288*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2289*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2290*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2291*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2292*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2293*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2294*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2295*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2296*4882a593Smuzhiyun return 1;
2297*4882a593Smuzhiyun }
2298*4882a593Smuzhiyun return 0;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun
sci_request_ssp_completion_status_is_tx_rx_suspend(unsigned int completion_status)2301*4882a593Smuzhiyun static int sci_request_ssp_completion_status_is_tx_rx_suspend(
2302*4882a593Smuzhiyun unsigned int completion_status)
2303*4882a593Smuzhiyun {
2304*4882a593Smuzhiyun return 0; /* There are no Tx/Rx SSP suspend conditions. */
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun
sci_request_stpsata_completion_status_is_tx_suspend(unsigned int completion_status)2307*4882a593Smuzhiyun static int sci_request_stpsata_completion_status_is_tx_suspend(
2308*4882a593Smuzhiyun unsigned int completion_status)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun switch (completion_status) {
2311*4882a593Smuzhiyun case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2312*4882a593Smuzhiyun case SCU_TASK_DONE_LL_R_ERR:
2313*4882a593Smuzhiyun case SCU_TASK_DONE_LL_PERR:
2314*4882a593Smuzhiyun case SCU_TASK_DONE_REG_ERR:
2315*4882a593Smuzhiyun case SCU_TASK_DONE_SDB_ERR:
2316*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2317*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2318*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2319*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2320*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2321*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2322*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2323*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2324*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2325*4882a593Smuzhiyun return 1;
2326*4882a593Smuzhiyun }
2327*4882a593Smuzhiyun return 0;
2328*4882a593Smuzhiyun }
2329*4882a593Smuzhiyun
2330*4882a593Smuzhiyun
sci_request_stpsata_completion_status_is_tx_rx_suspend(unsigned int completion_status)2331*4882a593Smuzhiyun static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
2332*4882a593Smuzhiyun unsigned int completion_status)
2333*4882a593Smuzhiyun {
2334*4882a593Smuzhiyun switch (completion_status) {
2335*4882a593Smuzhiyun case SCU_TASK_DONE_LF_ERR:
2336*4882a593Smuzhiyun case SCU_TASK_DONE_LL_SY_TERM:
2337*4882a593Smuzhiyun case SCU_TASK_DONE_LL_LF_TERM:
2338*4882a593Smuzhiyun case SCU_TASK_DONE_BREAK_RCVD:
2339*4882a593Smuzhiyun case SCU_TASK_DONE_INV_FIS_LEN:
2340*4882a593Smuzhiyun case SCU_TASK_DONE_UNEXP_FIS:
2341*4882a593Smuzhiyun case SCU_TASK_DONE_UNEXP_SDBFIS:
2342*4882a593Smuzhiyun case SCU_TASK_DONE_MAX_PLD_ERR:
2343*4882a593Smuzhiyun return 1;
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun return 0;
2346*4882a593Smuzhiyun }
2347*4882a593Smuzhiyun
sci_request_handle_suspending_completions(struct isci_request * ireq,u32 completion_code)2348*4882a593Smuzhiyun static void sci_request_handle_suspending_completions(
2349*4882a593Smuzhiyun struct isci_request *ireq,
2350*4882a593Smuzhiyun u32 completion_code)
2351*4882a593Smuzhiyun {
2352*4882a593Smuzhiyun int is_tx = 0;
2353*4882a593Smuzhiyun int is_tx_rx = 0;
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun switch (ireq->protocol) {
2356*4882a593Smuzhiyun case SAS_PROTOCOL_SMP:
2357*4882a593Smuzhiyun is_tx = sci_request_smp_completion_status_is_tx_suspend(
2358*4882a593Smuzhiyun completion_code);
2359*4882a593Smuzhiyun is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
2360*4882a593Smuzhiyun completion_code);
2361*4882a593Smuzhiyun break;
2362*4882a593Smuzhiyun case SAS_PROTOCOL_SSP:
2363*4882a593Smuzhiyun is_tx = sci_request_ssp_completion_status_is_tx_suspend(
2364*4882a593Smuzhiyun completion_code);
2365*4882a593Smuzhiyun is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
2366*4882a593Smuzhiyun completion_code);
2367*4882a593Smuzhiyun break;
2368*4882a593Smuzhiyun case SAS_PROTOCOL_STP:
2369*4882a593Smuzhiyun is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
2370*4882a593Smuzhiyun completion_code);
2371*4882a593Smuzhiyun is_tx_rx =
2372*4882a593Smuzhiyun sci_request_stpsata_completion_status_is_tx_rx_suspend(
2373*4882a593Smuzhiyun completion_code);
2374*4882a593Smuzhiyun break;
2375*4882a593Smuzhiyun default:
2376*4882a593Smuzhiyun dev_warn(&ireq->isci_host->pdev->dev,
2377*4882a593Smuzhiyun "%s: request %p has no valid protocol\n",
2378*4882a593Smuzhiyun __func__, ireq);
2379*4882a593Smuzhiyun break;
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun if (is_tx || is_tx_rx) {
2382*4882a593Smuzhiyun BUG_ON(is_tx && is_tx_rx);
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun sci_remote_node_context_suspend(
2385*4882a593Smuzhiyun &ireq->target_device->rnc,
2386*4882a593Smuzhiyun SCI_HW_SUSPEND,
2387*4882a593Smuzhiyun (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
2388*4882a593Smuzhiyun : SCU_EVENT_TL_RNC_SUSPEND_TX);
2389*4882a593Smuzhiyun }
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun enum sci_status
sci_io_request_tc_completion(struct isci_request * ireq,u32 completion_code)2393*4882a593Smuzhiyun sci_io_request_tc_completion(struct isci_request *ireq,
2394*4882a593Smuzhiyun u32 completion_code)
2395*4882a593Smuzhiyun {
2396*4882a593Smuzhiyun enum sci_base_request_states state;
2397*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
2398*4882a593Smuzhiyun
2399*4882a593Smuzhiyun state = ireq->sm.current_state_id;
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun /* Decode those completions that signal upcoming suspension events. */
2402*4882a593Smuzhiyun sci_request_handle_suspending_completions(
2403*4882a593Smuzhiyun ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun switch (state) {
2406*4882a593Smuzhiyun case SCI_REQ_STARTED:
2407*4882a593Smuzhiyun return request_started_state_tc_event(ireq, completion_code);
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun case SCI_REQ_TASK_WAIT_TC_COMP:
2410*4882a593Smuzhiyun return ssp_task_request_await_tc_event(ireq,
2411*4882a593Smuzhiyun completion_code);
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun case SCI_REQ_SMP_WAIT_RESP:
2414*4882a593Smuzhiyun return smp_request_await_response_tc_event(ireq,
2415*4882a593Smuzhiyun completion_code);
2416*4882a593Smuzhiyun
2417*4882a593Smuzhiyun case SCI_REQ_SMP_WAIT_TC_COMP:
2418*4882a593Smuzhiyun return smp_request_await_tc_event(ireq, completion_code);
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2421*4882a593Smuzhiyun return stp_request_udma_await_tc_event(ireq,
2422*4882a593Smuzhiyun completion_code);
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2425*4882a593Smuzhiyun return stp_request_non_data_await_h2d_tc_event(ireq,
2426*4882a593Smuzhiyun completion_code);
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun case SCI_REQ_STP_PIO_WAIT_H2D:
2429*4882a593Smuzhiyun return stp_request_pio_await_h2d_completion_tc_event(ireq,
2430*4882a593Smuzhiyun completion_code);
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun case SCI_REQ_STP_PIO_DATA_OUT:
2433*4882a593Smuzhiyun return pio_data_out_tx_done_tc_event(ireq, completion_code);
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun case SCI_REQ_ABORTING:
2436*4882a593Smuzhiyun return request_aborting_state_tc_event(ireq,
2437*4882a593Smuzhiyun completion_code);
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_H2D:
2440*4882a593Smuzhiyun return atapi_raw_completion(ireq, completion_code,
2441*4882a593Smuzhiyun SCI_REQ_ATAPI_WAIT_PIO_SETUP);
2442*4882a593Smuzhiyun
2443*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_TC_COMP:
2444*4882a593Smuzhiyun return atapi_raw_completion(ireq, completion_code,
2445*4882a593Smuzhiyun SCI_REQ_ATAPI_WAIT_D2H);
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun case SCI_REQ_ATAPI_WAIT_D2H:
2448*4882a593Smuzhiyun return atapi_data_tc_completion_handler(ireq, completion_code);
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun default:
2451*4882a593Smuzhiyun dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
2452*4882a593Smuzhiyun __func__, completion_code, req_state_name(state));
2453*4882a593Smuzhiyun return SCI_FAILURE_INVALID_STATE;
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun /**
2458*4882a593Smuzhiyun * isci_request_process_response_iu() - This function sets the status and
2459*4882a593Smuzhiyun * response iu, in the task struct, from the request object for the upper
2460*4882a593Smuzhiyun * layer driver.
2461*4882a593Smuzhiyun * @sas_task: This parameter is the task struct from the upper layer driver.
2462*4882a593Smuzhiyun * @resp_iu: This parameter points to the response iu of the completed request.
2463*4882a593Smuzhiyun * @dev: This parameter specifies the linux device struct.
2464*4882a593Smuzhiyun *
2465*4882a593Smuzhiyun * none.
2466*4882a593Smuzhiyun */
isci_request_process_response_iu(struct sas_task * task,struct ssp_response_iu * resp_iu,struct device * dev)2467*4882a593Smuzhiyun static void isci_request_process_response_iu(
2468*4882a593Smuzhiyun struct sas_task *task,
2469*4882a593Smuzhiyun struct ssp_response_iu *resp_iu,
2470*4882a593Smuzhiyun struct device *dev)
2471*4882a593Smuzhiyun {
2472*4882a593Smuzhiyun dev_dbg(dev,
2473*4882a593Smuzhiyun "%s: resp_iu = %p "
2474*4882a593Smuzhiyun "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2475*4882a593Smuzhiyun "resp_iu->response_data_len = %x, "
2476*4882a593Smuzhiyun "resp_iu->sense_data_len = %x\nresponse data: ",
2477*4882a593Smuzhiyun __func__,
2478*4882a593Smuzhiyun resp_iu,
2479*4882a593Smuzhiyun resp_iu->status,
2480*4882a593Smuzhiyun resp_iu->datapres,
2481*4882a593Smuzhiyun resp_iu->response_data_len,
2482*4882a593Smuzhiyun resp_iu->sense_data_len);
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun task->task_status.stat = resp_iu->status;
2485*4882a593Smuzhiyun
2486*4882a593Smuzhiyun /* libsas updates the task status fields based on the response iu. */
2487*4882a593Smuzhiyun sas_ssp_task_response(dev, task, resp_iu);
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun /**
2491*4882a593Smuzhiyun * isci_request_set_open_reject_status() - This function prepares the I/O
2492*4882a593Smuzhiyun * completion for OPEN_REJECT conditions.
2493*4882a593Smuzhiyun * @request: This parameter is the completed isci_request object.
2494*4882a593Smuzhiyun * @response_ptr: This parameter specifies the service response for the I/O.
2495*4882a593Smuzhiyun * @status_ptr: This parameter specifies the exec status for the I/O.
2496*4882a593Smuzhiyun * @open_rej_reason: This parameter specifies the encoded reason for the
2497*4882a593Smuzhiyun * abandon-class reject.
2498*4882a593Smuzhiyun *
2499*4882a593Smuzhiyun * none.
2500*4882a593Smuzhiyun */
isci_request_set_open_reject_status(struct isci_request * request,struct sas_task * task,enum service_response * response_ptr,enum exec_status * status_ptr,enum sas_open_rej_reason open_rej_reason)2501*4882a593Smuzhiyun static void isci_request_set_open_reject_status(
2502*4882a593Smuzhiyun struct isci_request *request,
2503*4882a593Smuzhiyun struct sas_task *task,
2504*4882a593Smuzhiyun enum service_response *response_ptr,
2505*4882a593Smuzhiyun enum exec_status *status_ptr,
2506*4882a593Smuzhiyun enum sas_open_rej_reason open_rej_reason)
2507*4882a593Smuzhiyun {
2508*4882a593Smuzhiyun /* Task in the target is done. */
2509*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2510*4882a593Smuzhiyun *response_ptr = SAS_TASK_UNDELIVERED;
2511*4882a593Smuzhiyun *status_ptr = SAS_OPEN_REJECT;
2512*4882a593Smuzhiyun task->task_status.open_rej_reason = open_rej_reason;
2513*4882a593Smuzhiyun }
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun /**
2516*4882a593Smuzhiyun * isci_request_handle_controller_specific_errors() - This function decodes
2517*4882a593Smuzhiyun * controller-specific I/O completion error conditions.
2518*4882a593Smuzhiyun * @request: This parameter is the completed isci_request object.
2519*4882a593Smuzhiyun * @response_ptr: This parameter specifies the service response for the I/O.
2520*4882a593Smuzhiyun * @status_ptr: This parameter specifies the exec status for the I/O.
2521*4882a593Smuzhiyun *
2522*4882a593Smuzhiyun * none.
2523*4882a593Smuzhiyun */
isci_request_handle_controller_specific_errors(struct isci_remote_device * idev,struct isci_request * request,struct sas_task * task,enum service_response * response_ptr,enum exec_status * status_ptr)2524*4882a593Smuzhiyun static void isci_request_handle_controller_specific_errors(
2525*4882a593Smuzhiyun struct isci_remote_device *idev,
2526*4882a593Smuzhiyun struct isci_request *request,
2527*4882a593Smuzhiyun struct sas_task *task,
2528*4882a593Smuzhiyun enum service_response *response_ptr,
2529*4882a593Smuzhiyun enum exec_status *status_ptr)
2530*4882a593Smuzhiyun {
2531*4882a593Smuzhiyun unsigned int cstatus;
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun cstatus = request->scu_status;
2534*4882a593Smuzhiyun
2535*4882a593Smuzhiyun dev_dbg(&request->isci_host->pdev->dev,
2536*4882a593Smuzhiyun "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2537*4882a593Smuzhiyun "- controller status = 0x%x\n",
2538*4882a593Smuzhiyun __func__, request, cstatus);
2539*4882a593Smuzhiyun
2540*4882a593Smuzhiyun /* Decode the controller-specific errors; most
2541*4882a593Smuzhiyun * important is to recognize those conditions in which
2542*4882a593Smuzhiyun * the target may still have a task outstanding that
2543*4882a593Smuzhiyun * must be aborted.
2544*4882a593Smuzhiyun *
2545*4882a593Smuzhiyun * Note that there are SCU completion codes being
2546*4882a593Smuzhiyun * named in the decode below for which SCIC has already
2547*4882a593Smuzhiyun * done work to handle them in a way other than as
2548*4882a593Smuzhiyun * a controller-specific completion code; these are left
2549*4882a593Smuzhiyun * in the decode below for completeness sake.
2550*4882a593Smuzhiyun */
2551*4882a593Smuzhiyun switch (cstatus) {
2552*4882a593Smuzhiyun case SCU_TASK_DONE_DMASETUP_DIRERR:
2553*4882a593Smuzhiyun /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2554*4882a593Smuzhiyun case SCU_TASK_DONE_XFERCNT_ERR:
2555*4882a593Smuzhiyun /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2556*4882a593Smuzhiyun if (task->task_proto == SAS_PROTOCOL_SMP) {
2557*4882a593Smuzhiyun /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2558*4882a593Smuzhiyun *response_ptr = SAS_TASK_COMPLETE;
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun /* See if the device has been/is being stopped. Note
2561*4882a593Smuzhiyun * that we ignore the quiesce state, since we are
2562*4882a593Smuzhiyun * concerned about the actual device state.
2563*4882a593Smuzhiyun */
2564*4882a593Smuzhiyun if (!idev)
2565*4882a593Smuzhiyun *status_ptr = SAS_DEVICE_UNKNOWN;
2566*4882a593Smuzhiyun else
2567*4882a593Smuzhiyun *status_ptr = SAS_ABORTED_TASK;
2568*4882a593Smuzhiyun
2569*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2570*4882a593Smuzhiyun } else {
2571*4882a593Smuzhiyun /* Task in the target is not done. */
2572*4882a593Smuzhiyun *response_ptr = SAS_TASK_UNDELIVERED;
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun if (!idev)
2575*4882a593Smuzhiyun *status_ptr = SAS_DEVICE_UNKNOWN;
2576*4882a593Smuzhiyun else
2577*4882a593Smuzhiyun *status_ptr = SAM_STAT_TASK_ABORTED;
2578*4882a593Smuzhiyun
2579*4882a593Smuzhiyun clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2580*4882a593Smuzhiyun }
2581*4882a593Smuzhiyun
2582*4882a593Smuzhiyun break;
2583*4882a593Smuzhiyun
2584*4882a593Smuzhiyun case SCU_TASK_DONE_CRC_ERR:
2585*4882a593Smuzhiyun case SCU_TASK_DONE_NAK_CMD_ERR:
2586*4882a593Smuzhiyun case SCU_TASK_DONE_EXCESS_DATA:
2587*4882a593Smuzhiyun case SCU_TASK_DONE_UNEXP_FIS:
2588*4882a593Smuzhiyun /* Also SCU_TASK_DONE_UNEXP_RESP: */
2589*4882a593Smuzhiyun case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2590*4882a593Smuzhiyun case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2591*4882a593Smuzhiyun case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2592*4882a593Smuzhiyun /* These are conditions in which the target
2593*4882a593Smuzhiyun * has completed the task, so that no cleanup
2594*4882a593Smuzhiyun * is necessary.
2595*4882a593Smuzhiyun */
2596*4882a593Smuzhiyun *response_ptr = SAS_TASK_COMPLETE;
2597*4882a593Smuzhiyun
2598*4882a593Smuzhiyun /* See if the device has been/is being stopped. Note
2599*4882a593Smuzhiyun * that we ignore the quiesce state, since we are
2600*4882a593Smuzhiyun * concerned about the actual device state.
2601*4882a593Smuzhiyun */
2602*4882a593Smuzhiyun if (!idev)
2603*4882a593Smuzhiyun *status_ptr = SAS_DEVICE_UNKNOWN;
2604*4882a593Smuzhiyun else
2605*4882a593Smuzhiyun *status_ptr = SAS_ABORTED_TASK;
2606*4882a593Smuzhiyun
2607*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2608*4882a593Smuzhiyun break;
2609*4882a593Smuzhiyun
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun /* Note that the only open reject completion codes seen here will be
2612*4882a593Smuzhiyun * abandon-class codes; all others are automatically retried in the SCU.
2613*4882a593Smuzhiyun */
2614*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2615*4882a593Smuzhiyun
2616*4882a593Smuzhiyun isci_request_set_open_reject_status(
2617*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2618*4882a593Smuzhiyun SAS_OREJ_WRONG_DEST);
2619*4882a593Smuzhiyun break;
2620*4882a593Smuzhiyun
2621*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun /* Note - the return of AB0 will change when
2624*4882a593Smuzhiyun * libsas implements detection of zone violations.
2625*4882a593Smuzhiyun */
2626*4882a593Smuzhiyun isci_request_set_open_reject_status(
2627*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2628*4882a593Smuzhiyun SAS_OREJ_RESV_AB0);
2629*4882a593Smuzhiyun break;
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun isci_request_set_open_reject_status(
2634*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2635*4882a593Smuzhiyun SAS_OREJ_RESV_AB1);
2636*4882a593Smuzhiyun break;
2637*4882a593Smuzhiyun
2638*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun isci_request_set_open_reject_status(
2641*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2642*4882a593Smuzhiyun SAS_OREJ_RESV_AB2);
2643*4882a593Smuzhiyun break;
2644*4882a593Smuzhiyun
2645*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2646*4882a593Smuzhiyun
2647*4882a593Smuzhiyun isci_request_set_open_reject_status(
2648*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2649*4882a593Smuzhiyun SAS_OREJ_RESV_AB3);
2650*4882a593Smuzhiyun break;
2651*4882a593Smuzhiyun
2652*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun isci_request_set_open_reject_status(
2655*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2656*4882a593Smuzhiyun SAS_OREJ_BAD_DEST);
2657*4882a593Smuzhiyun break;
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2660*4882a593Smuzhiyun
2661*4882a593Smuzhiyun isci_request_set_open_reject_status(
2662*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2663*4882a593Smuzhiyun SAS_OREJ_STP_NORES);
2664*4882a593Smuzhiyun break;
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun isci_request_set_open_reject_status(
2669*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2670*4882a593Smuzhiyun SAS_OREJ_EPROTO);
2671*4882a593Smuzhiyun break;
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun isci_request_set_open_reject_status(
2676*4882a593Smuzhiyun request, task, response_ptr, status_ptr,
2677*4882a593Smuzhiyun SAS_OREJ_CONN_RATE);
2678*4882a593Smuzhiyun break;
2679*4882a593Smuzhiyun
2680*4882a593Smuzhiyun case SCU_TASK_DONE_LL_R_ERR:
2681*4882a593Smuzhiyun /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2682*4882a593Smuzhiyun case SCU_TASK_DONE_LL_PERR:
2683*4882a593Smuzhiyun case SCU_TASK_DONE_LL_SY_TERM:
2684*4882a593Smuzhiyun /* Also SCU_TASK_DONE_NAK_ERR:*/
2685*4882a593Smuzhiyun case SCU_TASK_DONE_LL_LF_TERM:
2686*4882a593Smuzhiyun /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2687*4882a593Smuzhiyun case SCU_TASK_DONE_LL_ABORT_ERR:
2688*4882a593Smuzhiyun case SCU_TASK_DONE_SEQ_INV_TYPE:
2689*4882a593Smuzhiyun /* Also SCU_TASK_DONE_UNEXP_XR: */
2690*4882a593Smuzhiyun case SCU_TASK_DONE_XR_IU_LEN_ERR:
2691*4882a593Smuzhiyun case SCU_TASK_DONE_INV_FIS_LEN:
2692*4882a593Smuzhiyun /* Also SCU_TASK_DONE_XR_WD_LEN: */
2693*4882a593Smuzhiyun case SCU_TASK_DONE_SDMA_ERR:
2694*4882a593Smuzhiyun case SCU_TASK_DONE_OFFSET_ERR:
2695*4882a593Smuzhiyun case SCU_TASK_DONE_MAX_PLD_ERR:
2696*4882a593Smuzhiyun case SCU_TASK_DONE_LF_ERR:
2697*4882a593Smuzhiyun case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2698*4882a593Smuzhiyun case SCU_TASK_DONE_SMP_LL_RX_ERR:
2699*4882a593Smuzhiyun case SCU_TASK_DONE_UNEXP_DATA:
2700*4882a593Smuzhiyun case SCU_TASK_DONE_UNEXP_SDBFIS:
2701*4882a593Smuzhiyun case SCU_TASK_DONE_REG_ERR:
2702*4882a593Smuzhiyun case SCU_TASK_DONE_SDB_ERR:
2703*4882a593Smuzhiyun case SCU_TASK_DONE_TASK_ABORT:
2704*4882a593Smuzhiyun default:
2705*4882a593Smuzhiyun /* Task in the target is not done. */
2706*4882a593Smuzhiyun *response_ptr = SAS_TASK_UNDELIVERED;
2707*4882a593Smuzhiyun *status_ptr = SAM_STAT_TASK_ABORTED;
2708*4882a593Smuzhiyun
2709*4882a593Smuzhiyun if (task->task_proto == SAS_PROTOCOL_SMP)
2710*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2711*4882a593Smuzhiyun else
2712*4882a593Smuzhiyun clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2713*4882a593Smuzhiyun break;
2714*4882a593Smuzhiyun }
2715*4882a593Smuzhiyun }
2716*4882a593Smuzhiyun
isci_process_stp_response(struct sas_task * task,struct dev_to_host_fis * fis)2717*4882a593Smuzhiyun static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2718*4882a593Smuzhiyun {
2719*4882a593Smuzhiyun struct task_status_struct *ts = &task->task_status;
2720*4882a593Smuzhiyun struct ata_task_resp *resp = (void *)&ts->buf[0];
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun resp->frame_len = sizeof(*fis);
2723*4882a593Smuzhiyun memcpy(resp->ending_fis, fis, sizeof(*fis));
2724*4882a593Smuzhiyun ts->buf_valid_size = sizeof(*resp);
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun /* If an error is flagged let libata decode the fis */
2727*4882a593Smuzhiyun if (ac_err_mask(fis->status))
2728*4882a593Smuzhiyun ts->stat = SAS_PROTO_RESPONSE;
2729*4882a593Smuzhiyun else
2730*4882a593Smuzhiyun ts->stat = SAM_STAT_GOOD;
2731*4882a593Smuzhiyun
2732*4882a593Smuzhiyun ts->resp = SAS_TASK_COMPLETE;
2733*4882a593Smuzhiyun }
2734*4882a593Smuzhiyun
isci_request_io_request_complete(struct isci_host * ihost,struct isci_request * request,enum sci_io_status completion_status)2735*4882a593Smuzhiyun static void isci_request_io_request_complete(struct isci_host *ihost,
2736*4882a593Smuzhiyun struct isci_request *request,
2737*4882a593Smuzhiyun enum sci_io_status completion_status)
2738*4882a593Smuzhiyun {
2739*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(request);
2740*4882a593Smuzhiyun struct ssp_response_iu *resp_iu;
2741*4882a593Smuzhiyun unsigned long task_flags;
2742*4882a593Smuzhiyun struct isci_remote_device *idev = request->target_device;
2743*4882a593Smuzhiyun enum service_response response = SAS_TASK_UNDELIVERED;
2744*4882a593Smuzhiyun enum exec_status status = SAS_ABORTED_TASK;
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
2747*4882a593Smuzhiyun "%s: request = %p, task = %p, "
2748*4882a593Smuzhiyun "task->data_dir = %d completion_status = 0x%x\n",
2749*4882a593Smuzhiyun __func__, request, task, task->data_dir, completion_status);
2750*4882a593Smuzhiyun
2751*4882a593Smuzhiyun /* The request is done from an SCU HW perspective. */
2752*4882a593Smuzhiyun
2753*4882a593Smuzhiyun /* This is an active request being completed from the core. */
2754*4882a593Smuzhiyun switch (completion_status) {
2755*4882a593Smuzhiyun
2756*4882a593Smuzhiyun case SCI_IO_FAILURE_RESPONSE_VALID:
2757*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
2758*4882a593Smuzhiyun "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2759*4882a593Smuzhiyun __func__, request, task);
2760*4882a593Smuzhiyun
2761*4882a593Smuzhiyun if (sas_protocol_ata(task->task_proto)) {
2762*4882a593Smuzhiyun isci_process_stp_response(task, &request->stp.rsp);
2763*4882a593Smuzhiyun } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2764*4882a593Smuzhiyun
2765*4882a593Smuzhiyun /* crack the iu response buffer. */
2766*4882a593Smuzhiyun resp_iu = &request->ssp.rsp;
2767*4882a593Smuzhiyun isci_request_process_response_iu(task, resp_iu,
2768*4882a593Smuzhiyun &ihost->pdev->dev);
2769*4882a593Smuzhiyun
2770*4882a593Smuzhiyun } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2771*4882a593Smuzhiyun
2772*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
2773*4882a593Smuzhiyun "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2774*4882a593Smuzhiyun "SAS_PROTOCOL_SMP protocol\n",
2775*4882a593Smuzhiyun __func__);
2776*4882a593Smuzhiyun
2777*4882a593Smuzhiyun } else
2778*4882a593Smuzhiyun dev_err(&ihost->pdev->dev,
2779*4882a593Smuzhiyun "%s: unknown protocol\n", __func__);
2780*4882a593Smuzhiyun
2781*4882a593Smuzhiyun /* use the task status set in the task struct by the
2782*4882a593Smuzhiyun * isci_request_process_response_iu call.
2783*4882a593Smuzhiyun */
2784*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2785*4882a593Smuzhiyun response = task->task_status.resp;
2786*4882a593Smuzhiyun status = task->task_status.stat;
2787*4882a593Smuzhiyun break;
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun case SCI_IO_SUCCESS:
2790*4882a593Smuzhiyun case SCI_IO_SUCCESS_IO_DONE_EARLY:
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun response = SAS_TASK_COMPLETE;
2793*4882a593Smuzhiyun status = SAM_STAT_GOOD;
2794*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun /* This was an SSP / STP / SATA transfer.
2799*4882a593Smuzhiyun * There is a possibility that less data than
2800*4882a593Smuzhiyun * the maximum was transferred.
2801*4882a593Smuzhiyun */
2802*4882a593Smuzhiyun u32 transferred_length = sci_req_tx_bytes(request);
2803*4882a593Smuzhiyun
2804*4882a593Smuzhiyun task->task_status.residual
2805*4882a593Smuzhiyun = task->total_xfer_len - transferred_length;
2806*4882a593Smuzhiyun
2807*4882a593Smuzhiyun /* If there were residual bytes, call this an
2808*4882a593Smuzhiyun * underrun.
2809*4882a593Smuzhiyun */
2810*4882a593Smuzhiyun if (task->task_status.residual != 0)
2811*4882a593Smuzhiyun status = SAS_DATA_UNDERRUN;
2812*4882a593Smuzhiyun
2813*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
2814*4882a593Smuzhiyun "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2815*4882a593Smuzhiyun __func__, status);
2816*4882a593Smuzhiyun
2817*4882a593Smuzhiyun } else
2818*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
2819*4882a593Smuzhiyun __func__);
2820*4882a593Smuzhiyun break;
2821*4882a593Smuzhiyun
2822*4882a593Smuzhiyun case SCI_IO_FAILURE_TERMINATED:
2823*4882a593Smuzhiyun
2824*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
2825*4882a593Smuzhiyun "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2826*4882a593Smuzhiyun __func__, request, task);
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun /* The request was terminated explicitly. */
2829*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2830*4882a593Smuzhiyun response = SAS_TASK_UNDELIVERED;
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun /* See if the device has been/is being stopped. Note
2833*4882a593Smuzhiyun * that we ignore the quiesce state, since we are
2834*4882a593Smuzhiyun * concerned about the actual device state.
2835*4882a593Smuzhiyun */
2836*4882a593Smuzhiyun if (!idev)
2837*4882a593Smuzhiyun status = SAS_DEVICE_UNKNOWN;
2838*4882a593Smuzhiyun else
2839*4882a593Smuzhiyun status = SAS_ABORTED_TASK;
2840*4882a593Smuzhiyun break;
2841*4882a593Smuzhiyun
2842*4882a593Smuzhiyun case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2843*4882a593Smuzhiyun
2844*4882a593Smuzhiyun isci_request_handle_controller_specific_errors(idev, request,
2845*4882a593Smuzhiyun task, &response,
2846*4882a593Smuzhiyun &status);
2847*4882a593Smuzhiyun break;
2848*4882a593Smuzhiyun
2849*4882a593Smuzhiyun case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2850*4882a593Smuzhiyun /* This is a special case, in that the I/O completion
2851*4882a593Smuzhiyun * is telling us that the device needs a reset.
2852*4882a593Smuzhiyun * In order for the device reset condition to be
2853*4882a593Smuzhiyun * noticed, the I/O has to be handled in the error
2854*4882a593Smuzhiyun * handler. Set the reset flag and cause the
2855*4882a593Smuzhiyun * SCSI error thread to be scheduled.
2856*4882a593Smuzhiyun */
2857*4882a593Smuzhiyun spin_lock_irqsave(&task->task_state_lock, task_flags);
2858*4882a593Smuzhiyun task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2859*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun /* Fail the I/O. */
2862*4882a593Smuzhiyun response = SAS_TASK_UNDELIVERED;
2863*4882a593Smuzhiyun status = SAM_STAT_TASK_ABORTED;
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2866*4882a593Smuzhiyun break;
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun case SCI_FAILURE_RETRY_REQUIRED:
2869*4882a593Smuzhiyun
2870*4882a593Smuzhiyun /* Fail the I/O so it can be retried. */
2871*4882a593Smuzhiyun response = SAS_TASK_UNDELIVERED;
2872*4882a593Smuzhiyun if (!idev)
2873*4882a593Smuzhiyun status = SAS_DEVICE_UNKNOWN;
2874*4882a593Smuzhiyun else
2875*4882a593Smuzhiyun status = SAS_ABORTED_TASK;
2876*4882a593Smuzhiyun
2877*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2878*4882a593Smuzhiyun break;
2879*4882a593Smuzhiyun
2880*4882a593Smuzhiyun
2881*4882a593Smuzhiyun default:
2882*4882a593Smuzhiyun /* Catch any otherwise unhandled error codes here. */
2883*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
2884*4882a593Smuzhiyun "%s: invalid completion code: 0x%x - "
2885*4882a593Smuzhiyun "isci_request = %p\n",
2886*4882a593Smuzhiyun __func__, completion_status, request);
2887*4882a593Smuzhiyun
2888*4882a593Smuzhiyun response = SAS_TASK_UNDELIVERED;
2889*4882a593Smuzhiyun
2890*4882a593Smuzhiyun /* See if the device has been/is being stopped. Note
2891*4882a593Smuzhiyun * that we ignore the quiesce state, since we are
2892*4882a593Smuzhiyun * concerned about the actual device state.
2893*4882a593Smuzhiyun */
2894*4882a593Smuzhiyun if (!idev)
2895*4882a593Smuzhiyun status = SAS_DEVICE_UNKNOWN;
2896*4882a593Smuzhiyun else
2897*4882a593Smuzhiyun status = SAS_ABORTED_TASK;
2898*4882a593Smuzhiyun
2899*4882a593Smuzhiyun if (SAS_PROTOCOL_SMP == task->task_proto)
2900*4882a593Smuzhiyun set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2901*4882a593Smuzhiyun else
2902*4882a593Smuzhiyun clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2903*4882a593Smuzhiyun break;
2904*4882a593Smuzhiyun }
2905*4882a593Smuzhiyun
2906*4882a593Smuzhiyun switch (task->task_proto) {
2907*4882a593Smuzhiyun case SAS_PROTOCOL_SSP:
2908*4882a593Smuzhiyun if (task->data_dir == DMA_NONE)
2909*4882a593Smuzhiyun break;
2910*4882a593Smuzhiyun if (task->num_scatter == 0)
2911*4882a593Smuzhiyun /* 0 indicates a single dma address */
2912*4882a593Smuzhiyun dma_unmap_single(&ihost->pdev->dev,
2913*4882a593Smuzhiyun request->zero_scatter_daddr,
2914*4882a593Smuzhiyun task->total_xfer_len, task->data_dir);
2915*4882a593Smuzhiyun else /* unmap the sgl dma addresses */
2916*4882a593Smuzhiyun dma_unmap_sg(&ihost->pdev->dev, task->scatter,
2917*4882a593Smuzhiyun request->num_sg_entries, task->data_dir);
2918*4882a593Smuzhiyun break;
2919*4882a593Smuzhiyun case SAS_PROTOCOL_SMP: {
2920*4882a593Smuzhiyun struct scatterlist *sg = &task->smp_task.smp_req;
2921*4882a593Smuzhiyun struct smp_req *smp_req;
2922*4882a593Smuzhiyun void *kaddr;
2923*4882a593Smuzhiyun
2924*4882a593Smuzhiyun dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
2925*4882a593Smuzhiyun
2926*4882a593Smuzhiyun /* need to swab it back in case the command buffer is re-used */
2927*4882a593Smuzhiyun kaddr = kmap_atomic(sg_page(sg));
2928*4882a593Smuzhiyun smp_req = kaddr + sg->offset;
2929*4882a593Smuzhiyun sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2930*4882a593Smuzhiyun kunmap_atomic(kaddr);
2931*4882a593Smuzhiyun break;
2932*4882a593Smuzhiyun }
2933*4882a593Smuzhiyun default:
2934*4882a593Smuzhiyun break;
2935*4882a593Smuzhiyun }
2936*4882a593Smuzhiyun
2937*4882a593Smuzhiyun spin_lock_irqsave(&task->task_state_lock, task_flags);
2938*4882a593Smuzhiyun
2939*4882a593Smuzhiyun task->task_status.resp = response;
2940*4882a593Smuzhiyun task->task_status.stat = status;
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
2943*4882a593Smuzhiyun /* Normal notification (task_done) */
2944*4882a593Smuzhiyun task->task_state_flags |= SAS_TASK_STATE_DONE;
2945*4882a593Smuzhiyun task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
2946*4882a593Smuzhiyun SAS_TASK_STATE_PENDING);
2947*4882a593Smuzhiyun }
2948*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2949*4882a593Smuzhiyun
2950*4882a593Smuzhiyun /* complete the io request to the core. */
2951*4882a593Smuzhiyun sci_controller_complete_io(ihost, request->target_device, request);
2952*4882a593Smuzhiyun
2953*4882a593Smuzhiyun /* set terminated handle so it cannot be completed or
2954*4882a593Smuzhiyun * terminated again, and to cause any calls into abort
2955*4882a593Smuzhiyun * task to recognize the already completed case.
2956*4882a593Smuzhiyun */
2957*4882a593Smuzhiyun set_bit(IREQ_TERMINATED, &request->flags);
2958*4882a593Smuzhiyun
2959*4882a593Smuzhiyun ireq_done(ihost, request, task);
2960*4882a593Smuzhiyun }
2961*4882a593Smuzhiyun
sci_request_started_state_enter(struct sci_base_state_machine * sm)2962*4882a593Smuzhiyun static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2963*4882a593Smuzhiyun {
2964*4882a593Smuzhiyun struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2965*4882a593Smuzhiyun struct domain_device *dev = ireq->target_device->domain_dev;
2966*4882a593Smuzhiyun enum sci_base_request_states state;
2967*4882a593Smuzhiyun struct sas_task *task;
2968*4882a593Smuzhiyun
2969*4882a593Smuzhiyun /* XXX as hch said always creating an internal sas_task for tmf
2970*4882a593Smuzhiyun * requests would simplify the driver
2971*4882a593Smuzhiyun */
2972*4882a593Smuzhiyun task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun /* all unaccelerated request types (non ssp or ncq) handled with
2975*4882a593Smuzhiyun * substates
2976*4882a593Smuzhiyun */
2977*4882a593Smuzhiyun if (!task && dev->dev_type == SAS_END_DEVICE) {
2978*4882a593Smuzhiyun state = SCI_REQ_TASK_WAIT_TC_COMP;
2979*4882a593Smuzhiyun } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2980*4882a593Smuzhiyun state = SCI_REQ_SMP_WAIT_RESP;
2981*4882a593Smuzhiyun } else if (task && sas_protocol_ata(task->task_proto) &&
2982*4882a593Smuzhiyun !task->ata_task.use_ncq) {
2983*4882a593Smuzhiyun if (dev->sata_dev.class == ATA_DEV_ATAPI &&
2984*4882a593Smuzhiyun task->ata_task.fis.command == ATA_CMD_PACKET) {
2985*4882a593Smuzhiyun state = SCI_REQ_ATAPI_WAIT_H2D;
2986*4882a593Smuzhiyun } else if (task->data_dir == DMA_NONE) {
2987*4882a593Smuzhiyun state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2988*4882a593Smuzhiyun } else if (task->ata_task.dma_xfer) {
2989*4882a593Smuzhiyun state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2990*4882a593Smuzhiyun } else /* PIO */ {
2991*4882a593Smuzhiyun state = SCI_REQ_STP_PIO_WAIT_H2D;
2992*4882a593Smuzhiyun }
2993*4882a593Smuzhiyun } else {
2994*4882a593Smuzhiyun /* SSP or NCQ are fully accelerated, no substates */
2995*4882a593Smuzhiyun return;
2996*4882a593Smuzhiyun }
2997*4882a593Smuzhiyun sci_change_state(sm, state);
2998*4882a593Smuzhiyun }
2999*4882a593Smuzhiyun
sci_request_completed_state_enter(struct sci_base_state_machine * sm)3000*4882a593Smuzhiyun static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
3001*4882a593Smuzhiyun {
3002*4882a593Smuzhiyun struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3003*4882a593Smuzhiyun struct isci_host *ihost = ireq->owning_controller;
3004*4882a593Smuzhiyun
3005*4882a593Smuzhiyun /* Tell the SCI_USER that the IO request is complete */
3006*4882a593Smuzhiyun if (!test_bit(IREQ_TMF, &ireq->flags))
3007*4882a593Smuzhiyun isci_request_io_request_complete(ihost, ireq,
3008*4882a593Smuzhiyun ireq->sci_status);
3009*4882a593Smuzhiyun else
3010*4882a593Smuzhiyun isci_task_request_complete(ihost, ireq, ireq->sci_status);
3011*4882a593Smuzhiyun }
3012*4882a593Smuzhiyun
sci_request_aborting_state_enter(struct sci_base_state_machine * sm)3013*4882a593Smuzhiyun static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
3014*4882a593Smuzhiyun {
3015*4882a593Smuzhiyun struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3016*4882a593Smuzhiyun
3017*4882a593Smuzhiyun /* Setting the abort bit in the Task Context is required by the silicon. */
3018*4882a593Smuzhiyun ireq->tc->abort = 1;
3019*4882a593Smuzhiyun }
3020*4882a593Smuzhiyun
sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine * sm)3021*4882a593Smuzhiyun static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3022*4882a593Smuzhiyun {
3023*4882a593Smuzhiyun struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3024*4882a593Smuzhiyun
3025*4882a593Smuzhiyun ireq->target_device->working_request = ireq;
3026*4882a593Smuzhiyun }
3027*4882a593Smuzhiyun
sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine * sm)3028*4882a593Smuzhiyun static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3029*4882a593Smuzhiyun {
3030*4882a593Smuzhiyun struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3031*4882a593Smuzhiyun
3032*4882a593Smuzhiyun ireq->target_device->working_request = ireq;
3033*4882a593Smuzhiyun }
3034*4882a593Smuzhiyun
3035*4882a593Smuzhiyun static const struct sci_base_state sci_request_state_table[] = {
3036*4882a593Smuzhiyun [SCI_REQ_INIT] = { },
3037*4882a593Smuzhiyun [SCI_REQ_CONSTRUCTED] = { },
3038*4882a593Smuzhiyun [SCI_REQ_STARTED] = {
3039*4882a593Smuzhiyun .enter_state = sci_request_started_state_enter,
3040*4882a593Smuzhiyun },
3041*4882a593Smuzhiyun [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3042*4882a593Smuzhiyun .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
3043*4882a593Smuzhiyun },
3044*4882a593Smuzhiyun [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3045*4882a593Smuzhiyun [SCI_REQ_STP_PIO_WAIT_H2D] = {
3046*4882a593Smuzhiyun .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
3047*4882a593Smuzhiyun },
3048*4882a593Smuzhiyun [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3049*4882a593Smuzhiyun [SCI_REQ_STP_PIO_DATA_IN] = { },
3050*4882a593Smuzhiyun [SCI_REQ_STP_PIO_DATA_OUT] = { },
3051*4882a593Smuzhiyun [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3052*4882a593Smuzhiyun [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3053*4882a593Smuzhiyun [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3054*4882a593Smuzhiyun [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3055*4882a593Smuzhiyun [SCI_REQ_SMP_WAIT_RESP] = { },
3056*4882a593Smuzhiyun [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3057*4882a593Smuzhiyun [SCI_REQ_ATAPI_WAIT_H2D] = { },
3058*4882a593Smuzhiyun [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
3059*4882a593Smuzhiyun [SCI_REQ_ATAPI_WAIT_D2H] = { },
3060*4882a593Smuzhiyun [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
3061*4882a593Smuzhiyun [SCI_REQ_COMPLETED] = {
3062*4882a593Smuzhiyun .enter_state = sci_request_completed_state_enter,
3063*4882a593Smuzhiyun },
3064*4882a593Smuzhiyun [SCI_REQ_ABORTING] = {
3065*4882a593Smuzhiyun .enter_state = sci_request_aborting_state_enter,
3066*4882a593Smuzhiyun },
3067*4882a593Smuzhiyun [SCI_REQ_FINAL] = { },
3068*4882a593Smuzhiyun };
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun static void
sci_general_request_construct(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)3071*4882a593Smuzhiyun sci_general_request_construct(struct isci_host *ihost,
3072*4882a593Smuzhiyun struct isci_remote_device *idev,
3073*4882a593Smuzhiyun struct isci_request *ireq)
3074*4882a593Smuzhiyun {
3075*4882a593Smuzhiyun sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3076*4882a593Smuzhiyun
3077*4882a593Smuzhiyun ireq->target_device = idev;
3078*4882a593Smuzhiyun ireq->protocol = SAS_PROTOCOL_NONE;
3079*4882a593Smuzhiyun ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3080*4882a593Smuzhiyun
3081*4882a593Smuzhiyun ireq->sci_status = SCI_SUCCESS;
3082*4882a593Smuzhiyun ireq->scu_status = 0;
3083*4882a593Smuzhiyun ireq->post_context = 0xFFFFFFFF;
3084*4882a593Smuzhiyun }
3085*4882a593Smuzhiyun
3086*4882a593Smuzhiyun static enum sci_status
sci_io_request_construct(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)3087*4882a593Smuzhiyun sci_io_request_construct(struct isci_host *ihost,
3088*4882a593Smuzhiyun struct isci_remote_device *idev,
3089*4882a593Smuzhiyun struct isci_request *ireq)
3090*4882a593Smuzhiyun {
3091*4882a593Smuzhiyun struct domain_device *dev = idev->domain_dev;
3092*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
3093*4882a593Smuzhiyun
3094*4882a593Smuzhiyun /* Build the common part of the request */
3095*4882a593Smuzhiyun sci_general_request_construct(ihost, idev, ireq);
3096*4882a593Smuzhiyun
3097*4882a593Smuzhiyun if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3098*4882a593Smuzhiyun return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3099*4882a593Smuzhiyun
3100*4882a593Smuzhiyun if (dev->dev_type == SAS_END_DEVICE)
3101*4882a593Smuzhiyun /* pass */;
3102*4882a593Smuzhiyun else if (dev_is_sata(dev))
3103*4882a593Smuzhiyun memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3104*4882a593Smuzhiyun else if (dev_is_expander(dev->dev_type))
3105*4882a593Smuzhiyun /* pass */;
3106*4882a593Smuzhiyun else
3107*4882a593Smuzhiyun return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3110*4882a593Smuzhiyun
3111*4882a593Smuzhiyun return status;
3112*4882a593Smuzhiyun }
3113*4882a593Smuzhiyun
sci_task_request_construct(struct isci_host * ihost,struct isci_remote_device * idev,u16 io_tag,struct isci_request * ireq)3114*4882a593Smuzhiyun enum sci_status sci_task_request_construct(struct isci_host *ihost,
3115*4882a593Smuzhiyun struct isci_remote_device *idev,
3116*4882a593Smuzhiyun u16 io_tag, struct isci_request *ireq)
3117*4882a593Smuzhiyun {
3118*4882a593Smuzhiyun struct domain_device *dev = idev->domain_dev;
3119*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
3120*4882a593Smuzhiyun
3121*4882a593Smuzhiyun /* Build the common part of the request */
3122*4882a593Smuzhiyun sci_general_request_construct(ihost, idev, ireq);
3123*4882a593Smuzhiyun
3124*4882a593Smuzhiyun if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
3125*4882a593Smuzhiyun set_bit(IREQ_TMF, &ireq->flags);
3126*4882a593Smuzhiyun memset(ireq->tc, 0, sizeof(struct scu_task_context));
3127*4882a593Smuzhiyun
3128*4882a593Smuzhiyun /* Set the protocol indicator. */
3129*4882a593Smuzhiyun if (dev_is_sata(dev))
3130*4882a593Smuzhiyun ireq->protocol = SAS_PROTOCOL_STP;
3131*4882a593Smuzhiyun else
3132*4882a593Smuzhiyun ireq->protocol = SAS_PROTOCOL_SSP;
3133*4882a593Smuzhiyun } else
3134*4882a593Smuzhiyun status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun return status;
3137*4882a593Smuzhiyun }
3138*4882a593Smuzhiyun
isci_request_ssp_request_construct(struct isci_request * request)3139*4882a593Smuzhiyun static enum sci_status isci_request_ssp_request_construct(
3140*4882a593Smuzhiyun struct isci_request *request)
3141*4882a593Smuzhiyun {
3142*4882a593Smuzhiyun enum sci_status status;
3143*4882a593Smuzhiyun
3144*4882a593Smuzhiyun dev_dbg(&request->isci_host->pdev->dev,
3145*4882a593Smuzhiyun "%s: request = %p\n",
3146*4882a593Smuzhiyun __func__,
3147*4882a593Smuzhiyun request);
3148*4882a593Smuzhiyun status = sci_io_request_construct_basic_ssp(request);
3149*4882a593Smuzhiyun return status;
3150*4882a593Smuzhiyun }
3151*4882a593Smuzhiyun
isci_request_stp_request_construct(struct isci_request * ireq)3152*4882a593Smuzhiyun static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3153*4882a593Smuzhiyun {
3154*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
3155*4882a593Smuzhiyun struct host_to_dev_fis *fis = &ireq->stp.cmd;
3156*4882a593Smuzhiyun struct ata_queued_cmd *qc = task->uldd_task;
3157*4882a593Smuzhiyun enum sci_status status;
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun dev_dbg(&ireq->isci_host->pdev->dev,
3160*4882a593Smuzhiyun "%s: ireq = %p\n",
3161*4882a593Smuzhiyun __func__,
3162*4882a593Smuzhiyun ireq);
3163*4882a593Smuzhiyun
3164*4882a593Smuzhiyun memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3165*4882a593Smuzhiyun if (!task->ata_task.device_control_reg_update)
3166*4882a593Smuzhiyun fis->flags |= 0x80;
3167*4882a593Smuzhiyun fis->flags &= 0xF0;
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun status = sci_io_request_construct_basic_sata(ireq);
3170*4882a593Smuzhiyun
3171*4882a593Smuzhiyun if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3172*4882a593Smuzhiyun qc->tf.command == ATA_CMD_FPDMA_READ ||
3173*4882a593Smuzhiyun qc->tf.command == ATA_CMD_FPDMA_RECV ||
3174*4882a593Smuzhiyun qc->tf.command == ATA_CMD_FPDMA_SEND ||
3175*4882a593Smuzhiyun qc->tf.command == ATA_CMD_NCQ_NON_DATA)) {
3176*4882a593Smuzhiyun fis->sector_count = qc->tag << 3;
3177*4882a593Smuzhiyun ireq->tc->type.stp.ncq_tag = qc->tag;
3178*4882a593Smuzhiyun }
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun return status;
3181*4882a593Smuzhiyun }
3182*4882a593Smuzhiyun
3183*4882a593Smuzhiyun static enum sci_status
sci_io_request_construct_smp(struct device * dev,struct isci_request * ireq,struct sas_task * task)3184*4882a593Smuzhiyun sci_io_request_construct_smp(struct device *dev,
3185*4882a593Smuzhiyun struct isci_request *ireq,
3186*4882a593Smuzhiyun struct sas_task *task)
3187*4882a593Smuzhiyun {
3188*4882a593Smuzhiyun struct scatterlist *sg = &task->smp_task.smp_req;
3189*4882a593Smuzhiyun struct isci_remote_device *idev;
3190*4882a593Smuzhiyun struct scu_task_context *task_context;
3191*4882a593Smuzhiyun struct isci_port *iport;
3192*4882a593Smuzhiyun struct smp_req *smp_req;
3193*4882a593Smuzhiyun void *kaddr;
3194*4882a593Smuzhiyun u8 req_len;
3195*4882a593Smuzhiyun u32 cmd;
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun kaddr = kmap_atomic(sg_page(sg));
3198*4882a593Smuzhiyun smp_req = kaddr + sg->offset;
3199*4882a593Smuzhiyun /*
3200*4882a593Smuzhiyun * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3201*4882a593Smuzhiyun * functions under SAS 2.0, a zero request length really indicates
3202*4882a593Smuzhiyun * a non-zero default length.
3203*4882a593Smuzhiyun */
3204*4882a593Smuzhiyun if (smp_req->req_len == 0) {
3205*4882a593Smuzhiyun switch (smp_req->func) {
3206*4882a593Smuzhiyun case SMP_DISCOVER:
3207*4882a593Smuzhiyun case SMP_REPORT_PHY_ERR_LOG:
3208*4882a593Smuzhiyun case SMP_REPORT_PHY_SATA:
3209*4882a593Smuzhiyun case SMP_REPORT_ROUTE_INFO:
3210*4882a593Smuzhiyun smp_req->req_len = 2;
3211*4882a593Smuzhiyun break;
3212*4882a593Smuzhiyun case SMP_CONF_ROUTE_INFO:
3213*4882a593Smuzhiyun case SMP_PHY_CONTROL:
3214*4882a593Smuzhiyun case SMP_PHY_TEST_FUNCTION:
3215*4882a593Smuzhiyun smp_req->req_len = 9;
3216*4882a593Smuzhiyun break;
3217*4882a593Smuzhiyun /* Default - zero is a valid default for 2.0. */
3218*4882a593Smuzhiyun }
3219*4882a593Smuzhiyun }
3220*4882a593Smuzhiyun req_len = smp_req->req_len;
3221*4882a593Smuzhiyun sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3222*4882a593Smuzhiyun cmd = *(u32 *) smp_req;
3223*4882a593Smuzhiyun kunmap_atomic(kaddr);
3224*4882a593Smuzhiyun
3225*4882a593Smuzhiyun if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3226*4882a593Smuzhiyun return SCI_FAILURE;
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun ireq->protocol = SAS_PROTOCOL_SMP;
3229*4882a593Smuzhiyun
3230*4882a593Smuzhiyun /* byte swap the smp request. */
3231*4882a593Smuzhiyun
3232*4882a593Smuzhiyun task_context = ireq->tc;
3233*4882a593Smuzhiyun
3234*4882a593Smuzhiyun idev = ireq->target_device;
3235*4882a593Smuzhiyun iport = idev->owning_port;
3236*4882a593Smuzhiyun
3237*4882a593Smuzhiyun /*
3238*4882a593Smuzhiyun * Fill in the TC with its required data
3239*4882a593Smuzhiyun * 00h
3240*4882a593Smuzhiyun */
3241*4882a593Smuzhiyun task_context->priority = 0;
3242*4882a593Smuzhiyun task_context->initiator_request = 1;
3243*4882a593Smuzhiyun task_context->connection_rate = idev->connection_rate;
3244*4882a593Smuzhiyun task_context->protocol_engine_index = ISCI_PEG;
3245*4882a593Smuzhiyun task_context->logical_port_index = iport->physical_port_index;
3246*4882a593Smuzhiyun task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3247*4882a593Smuzhiyun task_context->abort = 0;
3248*4882a593Smuzhiyun task_context->valid = SCU_TASK_CONTEXT_VALID;
3249*4882a593Smuzhiyun task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3250*4882a593Smuzhiyun
3251*4882a593Smuzhiyun /* 04h */
3252*4882a593Smuzhiyun task_context->remote_node_index = idev->rnc.remote_node_index;
3253*4882a593Smuzhiyun task_context->command_code = 0;
3254*4882a593Smuzhiyun task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3255*4882a593Smuzhiyun
3256*4882a593Smuzhiyun /* 08h */
3257*4882a593Smuzhiyun task_context->link_layer_control = 0;
3258*4882a593Smuzhiyun task_context->do_not_dma_ssp_good_response = 1;
3259*4882a593Smuzhiyun task_context->strict_ordering = 0;
3260*4882a593Smuzhiyun task_context->control_frame = 1;
3261*4882a593Smuzhiyun task_context->timeout_enable = 0;
3262*4882a593Smuzhiyun task_context->block_guard_enable = 0;
3263*4882a593Smuzhiyun
3264*4882a593Smuzhiyun /* 0ch */
3265*4882a593Smuzhiyun task_context->address_modifier = 0;
3266*4882a593Smuzhiyun
3267*4882a593Smuzhiyun /* 10h */
3268*4882a593Smuzhiyun task_context->ssp_command_iu_length = req_len;
3269*4882a593Smuzhiyun
3270*4882a593Smuzhiyun /* 14h */
3271*4882a593Smuzhiyun task_context->transfer_length_bytes = 0;
3272*4882a593Smuzhiyun
3273*4882a593Smuzhiyun /*
3274*4882a593Smuzhiyun * 18h ~ 30h, protocol specific
3275*4882a593Smuzhiyun * since commandIU has been build by framework at this point, we just
3276*4882a593Smuzhiyun * copy the frist DWord from command IU to this location. */
3277*4882a593Smuzhiyun memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3278*4882a593Smuzhiyun
3279*4882a593Smuzhiyun /*
3280*4882a593Smuzhiyun * 40h
3281*4882a593Smuzhiyun * "For SMP you could program it to zero. We would prefer that way
3282*4882a593Smuzhiyun * so that done code will be consistent." - Venki
3283*4882a593Smuzhiyun */
3284*4882a593Smuzhiyun task_context->task_phase = 0;
3285*4882a593Smuzhiyun
3286*4882a593Smuzhiyun ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3287*4882a593Smuzhiyun (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3288*4882a593Smuzhiyun (iport->physical_port_index <<
3289*4882a593Smuzhiyun SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3290*4882a593Smuzhiyun ISCI_TAG_TCI(ireq->io_tag));
3291*4882a593Smuzhiyun /*
3292*4882a593Smuzhiyun * Copy the physical address for the command buffer to the SCU Task
3293*4882a593Smuzhiyun * Context command buffer should not contain command header.
3294*4882a593Smuzhiyun */
3295*4882a593Smuzhiyun task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3296*4882a593Smuzhiyun task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3297*4882a593Smuzhiyun
3298*4882a593Smuzhiyun /* SMP response comes as UF, so no need to set response IU address. */
3299*4882a593Smuzhiyun task_context->response_iu_upper = 0;
3300*4882a593Smuzhiyun task_context->response_iu_lower = 0;
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3303*4882a593Smuzhiyun
3304*4882a593Smuzhiyun return SCI_SUCCESS;
3305*4882a593Smuzhiyun }
3306*4882a593Smuzhiyun
3307*4882a593Smuzhiyun /*
3308*4882a593Smuzhiyun * isci_smp_request_build() - This function builds the smp request.
3309*4882a593Smuzhiyun * @ireq: This parameter points to the isci_request allocated in the
3310*4882a593Smuzhiyun * request construct function.
3311*4882a593Smuzhiyun *
3312*4882a593Smuzhiyun * SCI_SUCCESS on successfull completion, or specific failure code.
3313*4882a593Smuzhiyun */
isci_smp_request_build(struct isci_request * ireq)3314*4882a593Smuzhiyun static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3315*4882a593Smuzhiyun {
3316*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(ireq);
3317*4882a593Smuzhiyun struct device *dev = &ireq->isci_host->pdev->dev;
3318*4882a593Smuzhiyun enum sci_status status = SCI_FAILURE;
3319*4882a593Smuzhiyun
3320*4882a593Smuzhiyun status = sci_io_request_construct_smp(dev, ireq, task);
3321*4882a593Smuzhiyun if (status != SCI_SUCCESS)
3322*4882a593Smuzhiyun dev_dbg(&ireq->isci_host->pdev->dev,
3323*4882a593Smuzhiyun "%s: failed with status = %d\n",
3324*4882a593Smuzhiyun __func__,
3325*4882a593Smuzhiyun status);
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun return status;
3328*4882a593Smuzhiyun }
3329*4882a593Smuzhiyun
3330*4882a593Smuzhiyun /**
3331*4882a593Smuzhiyun * isci_io_request_build() - This function builds the io request object.
3332*4882a593Smuzhiyun * @ihost: This parameter specifies the ISCI host object
3333*4882a593Smuzhiyun * @request: This parameter points to the isci_request object allocated in the
3334*4882a593Smuzhiyun * request construct function.
3335*4882a593Smuzhiyun * @sci_device: This parameter is the handle for the sci core's remote device
3336*4882a593Smuzhiyun * object that is the destination for this request.
3337*4882a593Smuzhiyun *
3338*4882a593Smuzhiyun * SCI_SUCCESS on successfull completion, or specific failure code.
3339*4882a593Smuzhiyun */
isci_io_request_build(struct isci_host * ihost,struct isci_request * request,struct isci_remote_device * idev)3340*4882a593Smuzhiyun static enum sci_status isci_io_request_build(struct isci_host *ihost,
3341*4882a593Smuzhiyun struct isci_request *request,
3342*4882a593Smuzhiyun struct isci_remote_device *idev)
3343*4882a593Smuzhiyun {
3344*4882a593Smuzhiyun enum sci_status status = SCI_SUCCESS;
3345*4882a593Smuzhiyun struct sas_task *task = isci_request_access_task(request);
3346*4882a593Smuzhiyun
3347*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
3348*4882a593Smuzhiyun "%s: idev = 0x%p; request = %p, "
3349*4882a593Smuzhiyun "num_scatter = %d\n",
3350*4882a593Smuzhiyun __func__,
3351*4882a593Smuzhiyun idev,
3352*4882a593Smuzhiyun request,
3353*4882a593Smuzhiyun task->num_scatter);
3354*4882a593Smuzhiyun
3355*4882a593Smuzhiyun /* map the sgl addresses, if present.
3356*4882a593Smuzhiyun * libata does the mapping for sata devices
3357*4882a593Smuzhiyun * before we get the request.
3358*4882a593Smuzhiyun */
3359*4882a593Smuzhiyun if (task->num_scatter &&
3360*4882a593Smuzhiyun !sas_protocol_ata(task->task_proto) &&
3361*4882a593Smuzhiyun !(SAS_PROTOCOL_SMP & task->task_proto)) {
3362*4882a593Smuzhiyun
3363*4882a593Smuzhiyun request->num_sg_entries = dma_map_sg(
3364*4882a593Smuzhiyun &ihost->pdev->dev,
3365*4882a593Smuzhiyun task->scatter,
3366*4882a593Smuzhiyun task->num_scatter,
3367*4882a593Smuzhiyun task->data_dir
3368*4882a593Smuzhiyun );
3369*4882a593Smuzhiyun
3370*4882a593Smuzhiyun if (request->num_sg_entries == 0)
3371*4882a593Smuzhiyun return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3372*4882a593Smuzhiyun }
3373*4882a593Smuzhiyun
3374*4882a593Smuzhiyun status = sci_io_request_construct(ihost, idev, request);
3375*4882a593Smuzhiyun
3376*4882a593Smuzhiyun if (status != SCI_SUCCESS) {
3377*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
3378*4882a593Smuzhiyun "%s: failed request construct\n",
3379*4882a593Smuzhiyun __func__);
3380*4882a593Smuzhiyun return SCI_FAILURE;
3381*4882a593Smuzhiyun }
3382*4882a593Smuzhiyun
3383*4882a593Smuzhiyun switch (task->task_proto) {
3384*4882a593Smuzhiyun case SAS_PROTOCOL_SMP:
3385*4882a593Smuzhiyun status = isci_smp_request_build(request);
3386*4882a593Smuzhiyun break;
3387*4882a593Smuzhiyun case SAS_PROTOCOL_SSP:
3388*4882a593Smuzhiyun status = isci_request_ssp_request_construct(request);
3389*4882a593Smuzhiyun break;
3390*4882a593Smuzhiyun case SAS_PROTOCOL_SATA:
3391*4882a593Smuzhiyun case SAS_PROTOCOL_STP:
3392*4882a593Smuzhiyun case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3393*4882a593Smuzhiyun status = isci_request_stp_request_construct(request);
3394*4882a593Smuzhiyun break;
3395*4882a593Smuzhiyun default:
3396*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
3397*4882a593Smuzhiyun "%s: unknown protocol\n", __func__);
3398*4882a593Smuzhiyun return SCI_FAILURE;
3399*4882a593Smuzhiyun }
3400*4882a593Smuzhiyun
3401*4882a593Smuzhiyun return SCI_SUCCESS;
3402*4882a593Smuzhiyun }
3403*4882a593Smuzhiyun
isci_request_from_tag(struct isci_host * ihost,u16 tag)3404*4882a593Smuzhiyun static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3405*4882a593Smuzhiyun {
3406*4882a593Smuzhiyun struct isci_request *ireq;
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3409*4882a593Smuzhiyun ireq->io_tag = tag;
3410*4882a593Smuzhiyun ireq->io_request_completion = NULL;
3411*4882a593Smuzhiyun ireq->flags = 0;
3412*4882a593Smuzhiyun ireq->num_sg_entries = 0;
3413*4882a593Smuzhiyun
3414*4882a593Smuzhiyun return ireq;
3415*4882a593Smuzhiyun }
3416*4882a593Smuzhiyun
isci_io_request_from_tag(struct isci_host * ihost,struct sas_task * task,u16 tag)3417*4882a593Smuzhiyun static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3418*4882a593Smuzhiyun struct sas_task *task,
3419*4882a593Smuzhiyun u16 tag)
3420*4882a593Smuzhiyun {
3421*4882a593Smuzhiyun struct isci_request *ireq;
3422*4882a593Smuzhiyun
3423*4882a593Smuzhiyun ireq = isci_request_from_tag(ihost, tag);
3424*4882a593Smuzhiyun ireq->ttype_ptr.io_task_ptr = task;
3425*4882a593Smuzhiyun clear_bit(IREQ_TMF, &ireq->flags);
3426*4882a593Smuzhiyun task->lldd_task = ireq;
3427*4882a593Smuzhiyun
3428*4882a593Smuzhiyun return ireq;
3429*4882a593Smuzhiyun }
3430*4882a593Smuzhiyun
isci_tmf_request_from_tag(struct isci_host * ihost,struct isci_tmf * isci_tmf,u16 tag)3431*4882a593Smuzhiyun struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3432*4882a593Smuzhiyun struct isci_tmf *isci_tmf,
3433*4882a593Smuzhiyun u16 tag)
3434*4882a593Smuzhiyun {
3435*4882a593Smuzhiyun struct isci_request *ireq;
3436*4882a593Smuzhiyun
3437*4882a593Smuzhiyun ireq = isci_request_from_tag(ihost, tag);
3438*4882a593Smuzhiyun ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3439*4882a593Smuzhiyun set_bit(IREQ_TMF, &ireq->flags);
3440*4882a593Smuzhiyun
3441*4882a593Smuzhiyun return ireq;
3442*4882a593Smuzhiyun }
3443*4882a593Smuzhiyun
isci_request_execute(struct isci_host * ihost,struct isci_remote_device * idev,struct sas_task * task,u16 tag)3444*4882a593Smuzhiyun int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3445*4882a593Smuzhiyun struct sas_task *task, u16 tag)
3446*4882a593Smuzhiyun {
3447*4882a593Smuzhiyun enum sci_status status;
3448*4882a593Smuzhiyun struct isci_request *ireq;
3449*4882a593Smuzhiyun unsigned long flags;
3450*4882a593Smuzhiyun int ret = 0;
3451*4882a593Smuzhiyun
3452*4882a593Smuzhiyun /* do common allocation and init of request object. */
3453*4882a593Smuzhiyun ireq = isci_io_request_from_tag(ihost, task, tag);
3454*4882a593Smuzhiyun
3455*4882a593Smuzhiyun status = isci_io_request_build(ihost, ireq, idev);
3456*4882a593Smuzhiyun if (status != SCI_SUCCESS) {
3457*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
3458*4882a593Smuzhiyun "%s: request_construct failed - status = 0x%x\n",
3459*4882a593Smuzhiyun __func__,
3460*4882a593Smuzhiyun status);
3461*4882a593Smuzhiyun return status;
3462*4882a593Smuzhiyun }
3463*4882a593Smuzhiyun
3464*4882a593Smuzhiyun spin_lock_irqsave(&ihost->scic_lock, flags);
3465*4882a593Smuzhiyun
3466*4882a593Smuzhiyun if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3467*4882a593Smuzhiyun
3468*4882a593Smuzhiyun if (isci_task_is_ncq_recovery(task)) {
3469*4882a593Smuzhiyun
3470*4882a593Smuzhiyun /* The device is in an NCQ recovery state. Issue the
3471*4882a593Smuzhiyun * request on the task side. Note that it will
3472*4882a593Smuzhiyun * complete on the I/O request side because the
3473*4882a593Smuzhiyun * request was built that way (ie.
3474*4882a593Smuzhiyun * ireq->is_task_management_request is false).
3475*4882a593Smuzhiyun */
3476*4882a593Smuzhiyun status = sci_controller_start_task(ihost,
3477*4882a593Smuzhiyun idev,
3478*4882a593Smuzhiyun ireq);
3479*4882a593Smuzhiyun } else {
3480*4882a593Smuzhiyun status = SCI_FAILURE;
3481*4882a593Smuzhiyun }
3482*4882a593Smuzhiyun } else {
3483*4882a593Smuzhiyun /* send the request, let the core assign the IO TAG. */
3484*4882a593Smuzhiyun status = sci_controller_start_io(ihost, idev,
3485*4882a593Smuzhiyun ireq);
3486*4882a593Smuzhiyun }
3487*4882a593Smuzhiyun
3488*4882a593Smuzhiyun if (status != SCI_SUCCESS &&
3489*4882a593Smuzhiyun status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3490*4882a593Smuzhiyun dev_dbg(&ihost->pdev->dev,
3491*4882a593Smuzhiyun "%s: failed request start (0x%x)\n",
3492*4882a593Smuzhiyun __func__, status);
3493*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
3494*4882a593Smuzhiyun return status;
3495*4882a593Smuzhiyun }
3496*4882a593Smuzhiyun /* Either I/O started OK, or the core has signaled that
3497*4882a593Smuzhiyun * the device needs a target reset.
3498*4882a593Smuzhiyun */
3499*4882a593Smuzhiyun if (status != SCI_SUCCESS) {
3500*4882a593Smuzhiyun /* The request did not really start in the
3501*4882a593Smuzhiyun * hardware, so clear the request handle
3502*4882a593Smuzhiyun * here so no terminations will be done.
3503*4882a593Smuzhiyun */
3504*4882a593Smuzhiyun set_bit(IREQ_TERMINATED, &ireq->flags);
3505*4882a593Smuzhiyun }
3506*4882a593Smuzhiyun spin_unlock_irqrestore(&ihost->scic_lock, flags);
3507*4882a593Smuzhiyun
3508*4882a593Smuzhiyun if (status ==
3509*4882a593Smuzhiyun SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3510*4882a593Smuzhiyun /* Signal libsas that we need the SCSI error
3511*4882a593Smuzhiyun * handler thread to work on this I/O and that
3512*4882a593Smuzhiyun * we want a device reset.
3513*4882a593Smuzhiyun */
3514*4882a593Smuzhiyun spin_lock_irqsave(&task->task_state_lock, flags);
3515*4882a593Smuzhiyun task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3516*4882a593Smuzhiyun spin_unlock_irqrestore(&task->task_state_lock, flags);
3517*4882a593Smuzhiyun
3518*4882a593Smuzhiyun /* Cause this task to be scheduled in the SCSI error
3519*4882a593Smuzhiyun * handler thread.
3520*4882a593Smuzhiyun */
3521*4882a593Smuzhiyun sas_task_abort(task);
3522*4882a593Smuzhiyun
3523*4882a593Smuzhiyun /* Change the status, since we are holding
3524*4882a593Smuzhiyun * the I/O until it is managed by the SCSI
3525*4882a593Smuzhiyun * error handler.
3526*4882a593Smuzhiyun */
3527*4882a593Smuzhiyun status = SCI_SUCCESS;
3528*4882a593Smuzhiyun }
3529*4882a593Smuzhiyun
3530*4882a593Smuzhiyun return ret;
3531*4882a593Smuzhiyun }
3532