xref: /OK3568_Linux_fs/kernel/drivers/scsi/pm8001/pm8001_sas.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2008-2009 USI Co., Ltd.
5*4882a593Smuzhiyun  * All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
8*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
9*4882a593Smuzhiyun  * are met:
10*4882a593Smuzhiyun  * 1. Redistributions of source code must retain the above copyright
11*4882a593Smuzhiyun  *    notice, this list of conditions, and the following disclaimer,
12*4882a593Smuzhiyun  *    without modification.
13*4882a593Smuzhiyun  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14*4882a593Smuzhiyun  *    substantially similar to the "NO WARRANTY" disclaimer below
15*4882a593Smuzhiyun  *    ("Disclaimer") and any redistribution must be conditioned upon
16*4882a593Smuzhiyun  *    including a substantially similar Disclaimer requirement for further
17*4882a593Smuzhiyun  *    binary redistribution.
18*4882a593Smuzhiyun  * 3. Neither the names of the above-listed copyright holders nor the names
19*4882a593Smuzhiyun  *    of any contributors may be used to endorse or promote products derived
20*4882a593Smuzhiyun  *    from this software without specific prior written permission.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Alternatively, this software may be distributed under the terms of the
23*4882a593Smuzhiyun  * GNU General Public License ("GPL") version 2 as published by the Free
24*4882a593Smuzhiyun  * Software Foundation.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * NO WARRANTY
27*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31*4882a593Smuzhiyun  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32*4882a593Smuzhiyun  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33*4882a593Smuzhiyun  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34*4882a593Smuzhiyun  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35*4882a593Smuzhiyun  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36*4882a593Smuzhiyun  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37*4882a593Smuzhiyun  * POSSIBILITY OF SUCH DAMAGES.
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #include <linux/slab.h>
42*4882a593Smuzhiyun #include "pm8001_sas.h"
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /**
45*4882a593Smuzhiyun  * pm8001_find_tag - from sas task to find out  tag that belongs to this task
46*4882a593Smuzhiyun  * @task: the task sent to the LLDD
47*4882a593Smuzhiyun  * @tag: the found tag associated with the task
48*4882a593Smuzhiyun  */
pm8001_find_tag(struct sas_task * task,u32 * tag)49*4882a593Smuzhiyun static int pm8001_find_tag(struct sas_task *task, u32 *tag)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	if (task->lldd_task) {
52*4882a593Smuzhiyun 		struct pm8001_ccb_info *ccb;
53*4882a593Smuzhiyun 		ccb = task->lldd_task;
54*4882a593Smuzhiyun 		*tag = ccb->ccb_tag;
55*4882a593Smuzhiyun 		return 1;
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 	return 0;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun   * pm8001_tag_free - free the no more needed tag
62*4882a593Smuzhiyun   * @pm8001_ha: our hba struct
63*4882a593Smuzhiyun   * @tag: the found tag associated with the task
64*4882a593Smuzhiyun   */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)65*4882a593Smuzhiyun void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	void *bitmap = pm8001_ha->tags;
68*4882a593Smuzhiyun 	clear_bit(tag, bitmap);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /**
72*4882a593Smuzhiyun   * pm8001_tag_alloc - allocate a empty tag for task used.
73*4882a593Smuzhiyun   * @pm8001_ha: our hba struct
74*4882a593Smuzhiyun   * @tag_out: the found empty tag .
75*4882a593Smuzhiyun   */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)76*4882a593Smuzhiyun inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	unsigned int tag;
79*4882a593Smuzhiyun 	void *bitmap = pm8001_ha->tags;
80*4882a593Smuzhiyun 	unsigned long flags;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
83*4882a593Smuzhiyun 	tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
84*4882a593Smuzhiyun 	if (tag >= pm8001_ha->tags_num) {
85*4882a593Smuzhiyun 		spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
86*4882a593Smuzhiyun 		return -SAS_QUEUE_FULL;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 	set_bit(tag, bitmap);
89*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
90*4882a593Smuzhiyun 	*tag_out = tag;
91*4882a593Smuzhiyun 	return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
pm8001_tag_init(struct pm8001_hba_info * pm8001_ha)94*4882a593Smuzhiyun void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	int i;
97*4882a593Smuzhiyun 	for (i = 0; i < pm8001_ha->tags_num; ++i)
98*4882a593Smuzhiyun 		pm8001_tag_free(pm8001_ha, i);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun  /**
102*4882a593Smuzhiyun   * pm8001_mem_alloc - allocate memory for pm8001.
103*4882a593Smuzhiyun   * @pdev: pci device.
104*4882a593Smuzhiyun   * @virt_addr: the allocated virtual address
105*4882a593Smuzhiyun   * @pphys_addr_hi: the physical address high byte address.
106*4882a593Smuzhiyun   * @pphys_addr_lo: the physical address low byte address.
107*4882a593Smuzhiyun   * @mem_size: memory size.
108*4882a593Smuzhiyun   */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)109*4882a593Smuzhiyun int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
110*4882a593Smuzhiyun 	dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
111*4882a593Smuzhiyun 	u32 *pphys_addr_lo, u32 mem_size, u32 align)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	caddr_t mem_virt_alloc;
114*4882a593Smuzhiyun 	dma_addr_t mem_dma_handle;
115*4882a593Smuzhiyun 	u64 phys_align;
116*4882a593Smuzhiyun 	u64 align_offset = 0;
117*4882a593Smuzhiyun 	if (align)
118*4882a593Smuzhiyun 		align_offset = (dma_addr_t)align - 1;
119*4882a593Smuzhiyun 	mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
120*4882a593Smuzhiyun 					    &mem_dma_handle, GFP_KERNEL);
121*4882a593Smuzhiyun 	if (!mem_virt_alloc) {
122*4882a593Smuzhiyun 		pr_err("pm80xx: memory allocation error\n");
123*4882a593Smuzhiyun 		return -1;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 	*pphys_addr = mem_dma_handle;
126*4882a593Smuzhiyun 	phys_align = (*pphys_addr + align_offset) & ~align_offset;
127*4882a593Smuzhiyun 	*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
128*4882a593Smuzhiyun 	*pphys_addr_hi = upper_32_bits(phys_align);
129*4882a593Smuzhiyun 	*pphys_addr_lo = lower_32_bits(phys_align);
130*4882a593Smuzhiyun 	return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun   * pm8001_find_ha_by_dev - from domain device which come from sas layer to
135*4882a593Smuzhiyun   * find out our hba struct.
136*4882a593Smuzhiyun   * @dev: the domain device which from sas layer.
137*4882a593Smuzhiyun   */
138*4882a593Smuzhiyun static
pm8001_find_ha_by_dev(struct domain_device * dev)139*4882a593Smuzhiyun struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct sas_ha_struct *sha = dev->port->ha;
142*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
143*4882a593Smuzhiyun 	return pm8001_ha;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /**
147*4882a593Smuzhiyun   * pm8001_phy_control - this function should be registered to
148*4882a593Smuzhiyun   * sas_domain_function_template to provide libsas used, note: this is just
149*4882a593Smuzhiyun   * control the HBA phy rather than other expander phy if you want control
150*4882a593Smuzhiyun   * other phy, you should use SMP command.
151*4882a593Smuzhiyun   * @sas_phy: which phy in HBA phys.
152*4882a593Smuzhiyun   * @func: the operation.
153*4882a593Smuzhiyun   * @funcdata: always NULL.
154*4882a593Smuzhiyun   */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)155*4882a593Smuzhiyun int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
156*4882a593Smuzhiyun 	void *funcdata)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	int rc = 0, phy_id = sas_phy->id;
159*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha = NULL;
160*4882a593Smuzhiyun 	struct sas_phy_linkrates *rates;
161*4882a593Smuzhiyun 	struct pm8001_phy *phy;
162*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(completion);
163*4882a593Smuzhiyun 	unsigned long flags;
164*4882a593Smuzhiyun 	pm8001_ha = sas_phy->ha->lldd_ha;
165*4882a593Smuzhiyun 	phy = &pm8001_ha->phy[phy_id];
166*4882a593Smuzhiyun 	pm8001_ha->phy[phy_id].enable_completion = &completion;
167*4882a593Smuzhiyun 	switch (func) {
168*4882a593Smuzhiyun 	case PHY_FUNC_SET_LINK_RATE:
169*4882a593Smuzhiyun 		rates = funcdata;
170*4882a593Smuzhiyun 		if (rates->minimum_linkrate) {
171*4882a593Smuzhiyun 			pm8001_ha->phy[phy_id].minimum_linkrate =
172*4882a593Smuzhiyun 				rates->minimum_linkrate;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 		if (rates->maximum_linkrate) {
175*4882a593Smuzhiyun 			pm8001_ha->phy[phy_id].maximum_linkrate =
176*4882a593Smuzhiyun 				rates->maximum_linkrate;
177*4882a593Smuzhiyun 		}
178*4882a593Smuzhiyun 		if (pm8001_ha->phy[phy_id].phy_state ==  PHY_LINK_DISABLE) {
179*4882a593Smuzhiyun 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
180*4882a593Smuzhiyun 			wait_for_completion(&completion);
181*4882a593Smuzhiyun 		}
182*4882a593Smuzhiyun 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
183*4882a593Smuzhiyun 					      PHY_LINK_RESET);
184*4882a593Smuzhiyun 		break;
185*4882a593Smuzhiyun 	case PHY_FUNC_HARD_RESET:
186*4882a593Smuzhiyun 		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
187*4882a593Smuzhiyun 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
188*4882a593Smuzhiyun 			wait_for_completion(&completion);
189*4882a593Smuzhiyun 		}
190*4882a593Smuzhiyun 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
191*4882a593Smuzhiyun 					      PHY_HARD_RESET);
192*4882a593Smuzhiyun 		break;
193*4882a593Smuzhiyun 	case PHY_FUNC_LINK_RESET:
194*4882a593Smuzhiyun 		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
195*4882a593Smuzhiyun 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
196*4882a593Smuzhiyun 			wait_for_completion(&completion);
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
199*4882a593Smuzhiyun 					      PHY_LINK_RESET);
200*4882a593Smuzhiyun 		break;
201*4882a593Smuzhiyun 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
202*4882a593Smuzhiyun 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
203*4882a593Smuzhiyun 					      PHY_LINK_RESET);
204*4882a593Smuzhiyun 		break;
205*4882a593Smuzhiyun 	case PHY_FUNC_DISABLE:
206*4882a593Smuzhiyun 		if (pm8001_ha->chip_id != chip_8001) {
207*4882a593Smuzhiyun 			if (pm8001_ha->phy[phy_id].phy_state ==
208*4882a593Smuzhiyun 				PHY_STATE_LINK_UP_SPCV) {
209*4882a593Smuzhiyun 				sas_phy_disconnected(&phy->sas_phy);
210*4882a593Smuzhiyun 				sas_notify_phy_event(&phy->sas_phy,
211*4882a593Smuzhiyun 					PHYE_LOSS_OF_SIGNAL);
212*4882a593Smuzhiyun 				phy->phy_attached = 0;
213*4882a593Smuzhiyun 			}
214*4882a593Smuzhiyun 		} else {
215*4882a593Smuzhiyun 			if (pm8001_ha->phy[phy_id].phy_state ==
216*4882a593Smuzhiyun 				PHY_STATE_LINK_UP_SPC) {
217*4882a593Smuzhiyun 				sas_phy_disconnected(&phy->sas_phy);
218*4882a593Smuzhiyun 				sas_notify_phy_event(&phy->sas_phy,
219*4882a593Smuzhiyun 					PHYE_LOSS_OF_SIGNAL);
220*4882a593Smuzhiyun 				phy->phy_attached = 0;
221*4882a593Smuzhiyun 			}
222*4882a593Smuzhiyun 		}
223*4882a593Smuzhiyun 		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
224*4882a593Smuzhiyun 		break;
225*4882a593Smuzhiyun 	case PHY_FUNC_GET_EVENTS:
226*4882a593Smuzhiyun 		spin_lock_irqsave(&pm8001_ha->lock, flags);
227*4882a593Smuzhiyun 		if (pm8001_ha->chip_id == chip_8001) {
228*4882a593Smuzhiyun 			if (-1 == pm8001_bar4_shift(pm8001_ha,
229*4882a593Smuzhiyun 					(phy_id < 4) ? 0x30000 : 0x40000)) {
230*4882a593Smuzhiyun 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
231*4882a593Smuzhiyun 				return -EINVAL;
232*4882a593Smuzhiyun 			}
233*4882a593Smuzhiyun 		}
234*4882a593Smuzhiyun 		{
235*4882a593Smuzhiyun 			struct sas_phy *phy = sas_phy->phy;
236*4882a593Smuzhiyun 			uint32_t *qp = (uint32_t *)(((char *)
237*4882a593Smuzhiyun 				pm8001_ha->io_mem[2].memvirtaddr)
238*4882a593Smuzhiyun 				+ 0x1034 + (0x4000 * (phy_id & 3)));
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 			phy->invalid_dword_count = qp[0];
241*4882a593Smuzhiyun 			phy->running_disparity_error_count = qp[1];
242*4882a593Smuzhiyun 			phy->loss_of_dword_sync_count = qp[3];
243*4882a593Smuzhiyun 			phy->phy_reset_problem_count = qp[4];
244*4882a593Smuzhiyun 		}
245*4882a593Smuzhiyun 		if (pm8001_ha->chip_id == chip_8001)
246*4882a593Smuzhiyun 			pm8001_bar4_shift(pm8001_ha, 0);
247*4882a593Smuzhiyun 		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
248*4882a593Smuzhiyun 		return 0;
249*4882a593Smuzhiyun 	default:
250*4882a593Smuzhiyun 		pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
251*4882a593Smuzhiyun 		rc = -EOPNOTSUPP;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 	msleep(300);
254*4882a593Smuzhiyun 	return rc;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun   * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
259*4882a593Smuzhiyun   * command to HBA.
260*4882a593Smuzhiyun   * @shost: the scsi host data.
261*4882a593Smuzhiyun   */
pm8001_scan_start(struct Scsi_Host * shost)262*4882a593Smuzhiyun void pm8001_scan_start(struct Scsi_Host *shost)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	int i;
265*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha;
266*4882a593Smuzhiyun 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
267*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(completion);
268*4882a593Smuzhiyun 	pm8001_ha = sha->lldd_ha;
269*4882a593Smuzhiyun 	/* SAS_RE_INITIALIZATION not available in SPCv/ve */
270*4882a593Smuzhiyun 	if (pm8001_ha->chip_id == chip_8001)
271*4882a593Smuzhiyun 		PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
272*4882a593Smuzhiyun 	for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
273*4882a593Smuzhiyun 		pm8001_ha->phy[i].enable_completion = &completion;
274*4882a593Smuzhiyun 		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
275*4882a593Smuzhiyun 		wait_for_completion(&completion);
276*4882a593Smuzhiyun 		msleep(300);
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)280*4882a593Smuzhiyun int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/* give the phy enabling interrupt event time to come in (1s
285*4882a593Smuzhiyun 	* is empirically about all it takes) */
286*4882a593Smuzhiyun 	if (time < HZ)
287*4882a593Smuzhiyun 		return 0;
288*4882a593Smuzhiyun 	/* Wait for discovery to finish */
289*4882a593Smuzhiyun 	sas_drain_work(ha);
290*4882a593Smuzhiyun 	return 1;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun   * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
295*4882a593Smuzhiyun   * @pm8001_ha: our hba card information
296*4882a593Smuzhiyun   * @ccb: the ccb which attached to smp task
297*4882a593Smuzhiyun   */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)298*4882a593Smuzhiyun static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
299*4882a593Smuzhiyun 	struct pm8001_ccb_info *ccb)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)304*4882a593Smuzhiyun u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	struct ata_queued_cmd *qc = task->uldd_task;
307*4882a593Smuzhiyun 	if (qc) {
308*4882a593Smuzhiyun 		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
309*4882a593Smuzhiyun 		    qc->tf.command == ATA_CMD_FPDMA_READ ||
310*4882a593Smuzhiyun 		    qc->tf.command == ATA_CMD_FPDMA_RECV ||
311*4882a593Smuzhiyun 		    qc->tf.command == ATA_CMD_FPDMA_SEND ||
312*4882a593Smuzhiyun 		    qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
313*4882a593Smuzhiyun 			*tag = qc->tag;
314*4882a593Smuzhiyun 			return 1;
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 	return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun   * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
322*4882a593Smuzhiyun   * @pm8001_ha: our hba card information
323*4882a593Smuzhiyun   * @ccb: the ccb which attached to sata task
324*4882a593Smuzhiyun   */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)325*4882a593Smuzhiyun static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
326*4882a593Smuzhiyun 	struct pm8001_ccb_info *ccb)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun /**
332*4882a593Smuzhiyun   * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
333*4882a593Smuzhiyun   * @pm8001_ha: our hba card information
334*4882a593Smuzhiyun   * @ccb: the ccb which attached to TM
335*4882a593Smuzhiyun   * @tmf: the task management IU
336*4882a593Smuzhiyun   */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct pm8001_tmf_task * tmf)337*4882a593Smuzhiyun static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
338*4882a593Smuzhiyun 	struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /**
344*4882a593Smuzhiyun   * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
345*4882a593Smuzhiyun   * @pm8001_ha: our hba card information
346*4882a593Smuzhiyun   * @ccb: the ccb which attached to ssp task
347*4882a593Smuzhiyun   */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)348*4882a593Smuzhiyun static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
349*4882a593Smuzhiyun 	struct pm8001_ccb_info *ccb)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun  /* Find the local port id that's attached to this device */
sas_find_local_port_id(struct domain_device * dev)355*4882a593Smuzhiyun static int sas_find_local_port_id(struct domain_device *dev)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct domain_device *pdev = dev->parent;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* Directly attached device */
360*4882a593Smuzhiyun 	if (!pdev)
361*4882a593Smuzhiyun 		return dev->port->id;
362*4882a593Smuzhiyun 	while (pdev) {
363*4882a593Smuzhiyun 		struct domain_device *pdev_p = pdev->parent;
364*4882a593Smuzhiyun 		if (!pdev_p)
365*4882a593Smuzhiyun 			return pdev->port->id;
366*4882a593Smuzhiyun 		pdev = pdev->parent;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 	return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun #define DEV_IS_GONE(pm8001_dev)	\
372*4882a593Smuzhiyun 	((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
373*4882a593Smuzhiyun /**
374*4882a593Smuzhiyun   * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
375*4882a593Smuzhiyun   * @task: the task to be execute.
376*4882a593Smuzhiyun   * @gfp_flags: gfp_flags.
377*4882a593Smuzhiyun   * @is_tmf: if it is task management task.
378*4882a593Smuzhiyun   * @tmf: the task management IU
379*4882a593Smuzhiyun   */
pm8001_task_exec(struct sas_task * task,gfp_t gfp_flags,int is_tmf,struct pm8001_tmf_task * tmf)380*4882a593Smuzhiyun static int pm8001_task_exec(struct sas_task *task,
381*4882a593Smuzhiyun 	gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	struct domain_device *dev = task->dev;
384*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha;
385*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev;
386*4882a593Smuzhiyun 	struct pm8001_port *port = NULL;
387*4882a593Smuzhiyun 	struct sas_task *t = task;
388*4882a593Smuzhiyun 	struct pm8001_ccb_info *ccb;
389*4882a593Smuzhiyun 	u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
390*4882a593Smuzhiyun 	unsigned long flags = 0;
391*4882a593Smuzhiyun 	enum sas_protocol task_proto = t->task_proto;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	if (!dev->port) {
394*4882a593Smuzhiyun 		struct task_status_struct *tsm = &t->task_status;
395*4882a593Smuzhiyun 		tsm->resp = SAS_TASK_UNDELIVERED;
396*4882a593Smuzhiyun 		tsm->stat = SAS_PHY_DOWN;
397*4882a593Smuzhiyun 		if (dev->dev_type != SAS_SATA_DEV)
398*4882a593Smuzhiyun 			t->task_done(t);
399*4882a593Smuzhiyun 		return 0;
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 	pm8001_ha = pm8001_find_ha_by_dev(task->dev);
402*4882a593Smuzhiyun 	if (pm8001_ha->controller_fatal_error) {
403*4882a593Smuzhiyun 		struct task_status_struct *ts = &t->task_status;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 		ts->resp = SAS_TASK_UNDELIVERED;
406*4882a593Smuzhiyun 		t->task_done(t);
407*4882a593Smuzhiyun 		return 0;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
410*4882a593Smuzhiyun 	spin_lock_irqsave(&pm8001_ha->lock, flags);
411*4882a593Smuzhiyun 	do {
412*4882a593Smuzhiyun 		dev = t->dev;
413*4882a593Smuzhiyun 		pm8001_dev = dev->lldd_dev;
414*4882a593Smuzhiyun 		port = &pm8001_ha->port[sas_find_local_port_id(dev)];
415*4882a593Smuzhiyun 		if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
416*4882a593Smuzhiyun 			if (sas_protocol_ata(task_proto)) {
417*4882a593Smuzhiyun 				struct task_status_struct *ts = &t->task_status;
418*4882a593Smuzhiyun 				ts->resp = SAS_TASK_UNDELIVERED;
419*4882a593Smuzhiyun 				ts->stat = SAS_PHY_DOWN;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
422*4882a593Smuzhiyun 				t->task_done(t);
423*4882a593Smuzhiyun 				spin_lock_irqsave(&pm8001_ha->lock, flags);
424*4882a593Smuzhiyun 				continue;
425*4882a593Smuzhiyun 			} else {
426*4882a593Smuzhiyun 				struct task_status_struct *ts = &t->task_status;
427*4882a593Smuzhiyun 				ts->resp = SAS_TASK_UNDELIVERED;
428*4882a593Smuzhiyun 				ts->stat = SAS_PHY_DOWN;
429*4882a593Smuzhiyun 				t->task_done(t);
430*4882a593Smuzhiyun 				continue;
431*4882a593Smuzhiyun 			}
432*4882a593Smuzhiyun 		}
433*4882a593Smuzhiyun 		rc = pm8001_tag_alloc(pm8001_ha, &tag);
434*4882a593Smuzhiyun 		if (rc)
435*4882a593Smuzhiyun 			goto err_out;
436*4882a593Smuzhiyun 		ccb = &pm8001_ha->ccb_info[tag];
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		if (!sas_protocol_ata(task_proto)) {
439*4882a593Smuzhiyun 			if (t->num_scatter) {
440*4882a593Smuzhiyun 				n_elem = dma_map_sg(pm8001_ha->dev,
441*4882a593Smuzhiyun 					t->scatter,
442*4882a593Smuzhiyun 					t->num_scatter,
443*4882a593Smuzhiyun 					t->data_dir);
444*4882a593Smuzhiyun 				if (!n_elem) {
445*4882a593Smuzhiyun 					rc = -ENOMEM;
446*4882a593Smuzhiyun 					goto err_out_tag;
447*4882a593Smuzhiyun 				}
448*4882a593Smuzhiyun 			}
449*4882a593Smuzhiyun 		} else {
450*4882a593Smuzhiyun 			n_elem = t->num_scatter;
451*4882a593Smuzhiyun 		}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 		t->lldd_task = ccb;
454*4882a593Smuzhiyun 		ccb->n_elem = n_elem;
455*4882a593Smuzhiyun 		ccb->ccb_tag = tag;
456*4882a593Smuzhiyun 		ccb->task = t;
457*4882a593Smuzhiyun 		ccb->device = pm8001_dev;
458*4882a593Smuzhiyun 		switch (task_proto) {
459*4882a593Smuzhiyun 		case SAS_PROTOCOL_SMP:
460*4882a593Smuzhiyun 			atomic_inc(&pm8001_dev->running_req);
461*4882a593Smuzhiyun 			rc = pm8001_task_prep_smp(pm8001_ha, ccb);
462*4882a593Smuzhiyun 			break;
463*4882a593Smuzhiyun 		case SAS_PROTOCOL_SSP:
464*4882a593Smuzhiyun 			atomic_inc(&pm8001_dev->running_req);
465*4882a593Smuzhiyun 			if (is_tmf)
466*4882a593Smuzhiyun 				rc = pm8001_task_prep_ssp_tm(pm8001_ha,
467*4882a593Smuzhiyun 					ccb, tmf);
468*4882a593Smuzhiyun 			else
469*4882a593Smuzhiyun 				rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
470*4882a593Smuzhiyun 			break;
471*4882a593Smuzhiyun 		case SAS_PROTOCOL_SATA:
472*4882a593Smuzhiyun 		case SAS_PROTOCOL_STP:
473*4882a593Smuzhiyun 			atomic_inc(&pm8001_dev->running_req);
474*4882a593Smuzhiyun 			rc = pm8001_task_prep_ata(pm8001_ha, ccb);
475*4882a593Smuzhiyun 			break;
476*4882a593Smuzhiyun 		default:
477*4882a593Smuzhiyun 			dev_printk(KERN_ERR, pm8001_ha->dev,
478*4882a593Smuzhiyun 				"unknown sas_task proto: 0x%x\n", task_proto);
479*4882a593Smuzhiyun 			rc = -EINVAL;
480*4882a593Smuzhiyun 			break;
481*4882a593Smuzhiyun 		}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 		if (rc) {
484*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, IO, "rc is %x\n", rc);
485*4882a593Smuzhiyun 			atomic_dec(&pm8001_dev->running_req);
486*4882a593Smuzhiyun 			goto err_out_tag;
487*4882a593Smuzhiyun 		}
488*4882a593Smuzhiyun 		/* TODO: select normal or high priority */
489*4882a593Smuzhiyun 		spin_lock(&t->task_state_lock);
490*4882a593Smuzhiyun 		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
491*4882a593Smuzhiyun 		spin_unlock(&t->task_state_lock);
492*4882a593Smuzhiyun 	} while (0);
493*4882a593Smuzhiyun 	rc = 0;
494*4882a593Smuzhiyun 	goto out_done;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun err_out_tag:
497*4882a593Smuzhiyun 	pm8001_tag_free(pm8001_ha, tag);
498*4882a593Smuzhiyun err_out:
499*4882a593Smuzhiyun 	dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
500*4882a593Smuzhiyun 	if (!sas_protocol_ata(task_proto))
501*4882a593Smuzhiyun 		if (n_elem)
502*4882a593Smuzhiyun 			dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
503*4882a593Smuzhiyun 				t->data_dir);
504*4882a593Smuzhiyun out_done:
505*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
506*4882a593Smuzhiyun 	return rc;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun /**
510*4882a593Smuzhiyun   * pm8001_queue_command - register for upper layer used, all IO commands sent
511*4882a593Smuzhiyun   * to HBA are from this interface.
512*4882a593Smuzhiyun   * @task: the task to be execute.
513*4882a593Smuzhiyun   * @gfp_flags: gfp_flags
514*4882a593Smuzhiyun   */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)515*4882a593Smuzhiyun int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	return pm8001_task_exec(task, gfp_flags, 0, NULL);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /**
521*4882a593Smuzhiyun   * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
522*4882a593Smuzhiyun   * @pm8001_ha: our hba card information
523*4882a593Smuzhiyun   * @ccb: the ccb which attached to ssp task
524*4882a593Smuzhiyun   * @task: the task to be free.
525*4882a593Smuzhiyun   * @ccb_idx: ccb index.
526*4882a593Smuzhiyun   */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct sas_task * task,struct pm8001_ccb_info * ccb,u32 ccb_idx)527*4882a593Smuzhiyun void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
528*4882a593Smuzhiyun 	struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	if (!ccb->task)
531*4882a593Smuzhiyun 		return;
532*4882a593Smuzhiyun 	if (!sas_protocol_ata(task->task_proto))
533*4882a593Smuzhiyun 		if (ccb->n_elem)
534*4882a593Smuzhiyun 			dma_unmap_sg(pm8001_ha->dev, task->scatter,
535*4882a593Smuzhiyun 				task->num_scatter, task->data_dir);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	switch (task->task_proto) {
538*4882a593Smuzhiyun 	case SAS_PROTOCOL_SMP:
539*4882a593Smuzhiyun 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
540*4882a593Smuzhiyun 			DMA_FROM_DEVICE);
541*4882a593Smuzhiyun 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
542*4882a593Smuzhiyun 			DMA_TO_DEVICE);
543*4882a593Smuzhiyun 		break;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	case SAS_PROTOCOL_SATA:
546*4882a593Smuzhiyun 	case SAS_PROTOCOL_STP:
547*4882a593Smuzhiyun 	case SAS_PROTOCOL_SSP:
548*4882a593Smuzhiyun 	default:
549*4882a593Smuzhiyun 		/* do nothing */
550*4882a593Smuzhiyun 		break;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 	task->lldd_task = NULL;
553*4882a593Smuzhiyun 	ccb->task = NULL;
554*4882a593Smuzhiyun 	ccb->ccb_tag = 0xFFFFFFFF;
555*4882a593Smuzhiyun 	ccb->open_retry = 0;
556*4882a593Smuzhiyun 	pm8001_tag_free(pm8001_ha, ccb_idx);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun  /**
560*4882a593Smuzhiyun   * pm8001_alloc_dev - find a empty pm8001_device
561*4882a593Smuzhiyun   * @pm8001_ha: our hba card information
562*4882a593Smuzhiyun   */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)563*4882a593Smuzhiyun static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	u32 dev;
566*4882a593Smuzhiyun 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
567*4882a593Smuzhiyun 		if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
568*4882a593Smuzhiyun 			pm8001_ha->devices[dev].id = dev;
569*4882a593Smuzhiyun 			return &pm8001_ha->devices[dev];
570*4882a593Smuzhiyun 		}
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 	if (dev == PM8001_MAX_DEVICES) {
573*4882a593Smuzhiyun 		pm8001_dbg(pm8001_ha, FAIL,
574*4882a593Smuzhiyun 			   "max support %d devices, ignore ..\n",
575*4882a593Smuzhiyun 			   PM8001_MAX_DEVICES);
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 	return NULL;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun /**
580*4882a593Smuzhiyun   * pm8001_find_dev - find a matching pm8001_device
581*4882a593Smuzhiyun   * @pm8001_ha: our hba card information
582*4882a593Smuzhiyun   * @device_id: device ID to match against
583*4882a593Smuzhiyun   */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)584*4882a593Smuzhiyun struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
585*4882a593Smuzhiyun 					u32 device_id)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	u32 dev;
588*4882a593Smuzhiyun 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
589*4882a593Smuzhiyun 		if (pm8001_ha->devices[dev].device_id == device_id)
590*4882a593Smuzhiyun 			return &pm8001_ha->devices[dev];
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 	if (dev == PM8001_MAX_DEVICES) {
593*4882a593Smuzhiyun 		pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 	return NULL;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
pm8001_free_dev(struct pm8001_device * pm8001_dev)598*4882a593Smuzhiyun static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	u32 id = pm8001_dev->id;
601*4882a593Smuzhiyun 	memset(pm8001_dev, 0, sizeof(*pm8001_dev));
602*4882a593Smuzhiyun 	pm8001_dev->id = id;
603*4882a593Smuzhiyun 	pm8001_dev->dev_type = SAS_PHY_UNUSED;
604*4882a593Smuzhiyun 	pm8001_dev->device_id = PM8001_MAX_DEVICES;
605*4882a593Smuzhiyun 	pm8001_dev->sas_device = NULL;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun /**
609*4882a593Smuzhiyun   * pm8001_dev_found_notify - libsas notify a device is found.
610*4882a593Smuzhiyun   * @dev: the device structure which sas layer used.
611*4882a593Smuzhiyun   *
612*4882a593Smuzhiyun   * when libsas find a sas domain device, it should tell the LLDD that
613*4882a593Smuzhiyun   * device is found, and then LLDD register this device to HBA firmware
614*4882a593Smuzhiyun   * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
615*4882a593Smuzhiyun   * device ID(according to device's sas address) and returned it to LLDD. From
616*4882a593Smuzhiyun   * now on, we communicate with HBA FW with the device ID which HBA assigned
617*4882a593Smuzhiyun   * rather than sas address. it is the necessary step for our HBA but it is
618*4882a593Smuzhiyun   * the optional for other HBA driver.
619*4882a593Smuzhiyun   */
pm8001_dev_found_notify(struct domain_device * dev)620*4882a593Smuzhiyun static int pm8001_dev_found_notify(struct domain_device *dev)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun 	unsigned long flags = 0;
623*4882a593Smuzhiyun 	int res = 0;
624*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha = NULL;
625*4882a593Smuzhiyun 	struct domain_device *parent_dev = dev->parent;
626*4882a593Smuzhiyun 	struct pm8001_device *pm8001_device;
627*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(completion);
628*4882a593Smuzhiyun 	u32 flag = 0;
629*4882a593Smuzhiyun 	pm8001_ha = pm8001_find_ha_by_dev(dev);
630*4882a593Smuzhiyun 	spin_lock_irqsave(&pm8001_ha->lock, flags);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	pm8001_device = pm8001_alloc_dev(pm8001_ha);
633*4882a593Smuzhiyun 	if (!pm8001_device) {
634*4882a593Smuzhiyun 		res = -1;
635*4882a593Smuzhiyun 		goto found_out;
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 	pm8001_device->sas_device = dev;
638*4882a593Smuzhiyun 	dev->lldd_dev = pm8001_device;
639*4882a593Smuzhiyun 	pm8001_device->dev_type = dev->dev_type;
640*4882a593Smuzhiyun 	pm8001_device->dcompletion = &completion;
641*4882a593Smuzhiyun 	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
642*4882a593Smuzhiyun 		int phy_id;
643*4882a593Smuzhiyun 		struct ex_phy *phy;
644*4882a593Smuzhiyun 		for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
645*4882a593Smuzhiyun 		phy_id++) {
646*4882a593Smuzhiyun 			phy = &parent_dev->ex_dev.ex_phy[phy_id];
647*4882a593Smuzhiyun 			if (SAS_ADDR(phy->attached_sas_addr)
648*4882a593Smuzhiyun 				== SAS_ADDR(dev->sas_addr)) {
649*4882a593Smuzhiyun 				pm8001_device->attached_phy = phy_id;
650*4882a593Smuzhiyun 				break;
651*4882a593Smuzhiyun 			}
652*4882a593Smuzhiyun 		}
653*4882a593Smuzhiyun 		if (phy_id == parent_dev->ex_dev.num_phys) {
654*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, FAIL,
655*4882a593Smuzhiyun 				   "Error: no attached dev:%016llx at ex:%016llx.\n",
656*4882a593Smuzhiyun 				   SAS_ADDR(dev->sas_addr),
657*4882a593Smuzhiyun 				   SAS_ADDR(parent_dev->sas_addr));
658*4882a593Smuzhiyun 			res = -1;
659*4882a593Smuzhiyun 		}
660*4882a593Smuzhiyun 	} else {
661*4882a593Smuzhiyun 		if (dev->dev_type == SAS_SATA_DEV) {
662*4882a593Smuzhiyun 			pm8001_device->attached_phy =
663*4882a593Smuzhiyun 				dev->rphy->identify.phy_identifier;
664*4882a593Smuzhiyun 			flag = 1; /* directly sata */
665*4882a593Smuzhiyun 		}
666*4882a593Smuzhiyun 	} /*register this device to HBA*/
667*4882a593Smuzhiyun 	pm8001_dbg(pm8001_ha, DISC, "Found device\n");
668*4882a593Smuzhiyun 	PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
669*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
670*4882a593Smuzhiyun 	wait_for_completion(&completion);
671*4882a593Smuzhiyun 	if (dev->dev_type == SAS_END_DEVICE)
672*4882a593Smuzhiyun 		msleep(50);
673*4882a593Smuzhiyun 	pm8001_ha->flags = PM8001F_RUN_TIME;
674*4882a593Smuzhiyun 	return 0;
675*4882a593Smuzhiyun found_out:
676*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
677*4882a593Smuzhiyun 	return res;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
pm8001_dev_found(struct domain_device * dev)680*4882a593Smuzhiyun int pm8001_dev_found(struct domain_device *dev)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	return pm8001_dev_found_notify(dev);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
pm8001_task_done(struct sas_task * task)685*4882a593Smuzhiyun void pm8001_task_done(struct sas_task *task)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	del_timer(&task->slow_task->timer);
688*4882a593Smuzhiyun 	complete(&task->slow_task->completion);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
pm8001_tmf_timedout(struct timer_list * t)691*4882a593Smuzhiyun static void pm8001_tmf_timedout(struct timer_list *t)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	struct sas_task_slow *slow = from_timer(slow, t, timer);
694*4882a593Smuzhiyun 	struct sas_task *task = slow->task;
695*4882a593Smuzhiyun 	unsigned long flags;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	spin_lock_irqsave(&task->task_state_lock, flags);
698*4882a593Smuzhiyun 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
699*4882a593Smuzhiyun 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
700*4882a593Smuzhiyun 		complete(&task->slow_task->completion);
701*4882a593Smuzhiyun 	}
702*4882a593Smuzhiyun 	spin_unlock_irqrestore(&task->task_state_lock, flags);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun #define PM8001_TASK_TIMEOUT 20
706*4882a593Smuzhiyun /**
707*4882a593Smuzhiyun   * pm8001_exec_internal_tmf_task - execute some task management commands.
708*4882a593Smuzhiyun   * @dev: the wanted device.
709*4882a593Smuzhiyun   * @tmf: which task management wanted to be take.
710*4882a593Smuzhiyun   * @para_len: para_len.
711*4882a593Smuzhiyun   * @parameter: ssp task parameter.
712*4882a593Smuzhiyun   *
713*4882a593Smuzhiyun   * when errors or exception happened, we may want to do something, for example
714*4882a593Smuzhiyun   * abort the issued task which result in this execption, it is done by calling
715*4882a593Smuzhiyun   * this function, note it is also with the task execute interface.
716*4882a593Smuzhiyun   */
pm8001_exec_internal_tmf_task(struct domain_device * dev,void * parameter,u32 para_len,struct pm8001_tmf_task * tmf)717*4882a593Smuzhiyun static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
718*4882a593Smuzhiyun 	void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun 	int res, retry;
721*4882a593Smuzhiyun 	struct sas_task *task = NULL;
722*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
723*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
724*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	for (retry = 0; retry < 3; retry++) {
727*4882a593Smuzhiyun 		task = sas_alloc_slow_task(GFP_KERNEL);
728*4882a593Smuzhiyun 		if (!task)
729*4882a593Smuzhiyun 			return -ENOMEM;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 		task->dev = dev;
732*4882a593Smuzhiyun 		task->task_proto = dev->tproto;
733*4882a593Smuzhiyun 		memcpy(&task->ssp_task, parameter, para_len);
734*4882a593Smuzhiyun 		task->task_done = pm8001_task_done;
735*4882a593Smuzhiyun 		task->slow_task->timer.function = pm8001_tmf_timedout;
736*4882a593Smuzhiyun 		task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
737*4882a593Smuzhiyun 		add_timer(&task->slow_task->timer);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 		res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 		if (res) {
742*4882a593Smuzhiyun 			del_timer(&task->slow_task->timer);
743*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
744*4882a593Smuzhiyun 			goto ex_err;
745*4882a593Smuzhiyun 		}
746*4882a593Smuzhiyun 		wait_for_completion(&task->slow_task->completion);
747*4882a593Smuzhiyun 		if (pm8001_ha->chip_id != chip_8001) {
748*4882a593Smuzhiyun 			pm8001_dev->setds_completion = &completion_setstate;
749*4882a593Smuzhiyun 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
750*4882a593Smuzhiyun 				pm8001_dev, 0x01);
751*4882a593Smuzhiyun 			wait_for_completion(&completion_setstate);
752*4882a593Smuzhiyun 		}
753*4882a593Smuzhiyun 		res = -TMF_RESP_FUNC_FAILED;
754*4882a593Smuzhiyun 		/* Even TMF timed out, return direct. */
755*4882a593Smuzhiyun 		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
756*4882a593Smuzhiyun 			struct pm8001_ccb_info *ccb = task->lldd_task;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
759*4882a593Smuzhiyun 				   tmf->tmf);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 			if (ccb)
762*4882a593Smuzhiyun 				ccb->task = NULL;
763*4882a593Smuzhiyun 			goto ex_err;
764*4882a593Smuzhiyun 		}
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
767*4882a593Smuzhiyun 			task->task_status.stat == SAM_STAT_GOOD) {
768*4882a593Smuzhiyun 			res = TMF_RESP_FUNC_COMPLETE;
769*4882a593Smuzhiyun 			break;
770*4882a593Smuzhiyun 		}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
773*4882a593Smuzhiyun 		task->task_status.stat == SAS_DATA_UNDERRUN) {
774*4882a593Smuzhiyun 			/* no error, but return the number of bytes of
775*4882a593Smuzhiyun 			* underrun */
776*4882a593Smuzhiyun 			res = task->task_status.residual;
777*4882a593Smuzhiyun 			break;
778*4882a593Smuzhiyun 		}
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
781*4882a593Smuzhiyun 			task->task_status.stat == SAS_DATA_OVERRUN) {
782*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, FAIL, "Blocked task error.\n");
783*4882a593Smuzhiyun 			res = -EMSGSIZE;
784*4882a593Smuzhiyun 			break;
785*4882a593Smuzhiyun 		} else {
786*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, EH,
787*4882a593Smuzhiyun 				   " Task to dev %016llx response:0x%x status 0x%x\n",
788*4882a593Smuzhiyun 				   SAS_ADDR(dev->sas_addr),
789*4882a593Smuzhiyun 				   task->task_status.resp,
790*4882a593Smuzhiyun 				   task->task_status.stat);
791*4882a593Smuzhiyun 			sas_free_task(task);
792*4882a593Smuzhiyun 			task = NULL;
793*4882a593Smuzhiyun 		}
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun ex_err:
796*4882a593Smuzhiyun 	BUG_ON(retry == 3 && task != NULL);
797*4882a593Smuzhiyun 	sas_free_task(task);
798*4882a593Smuzhiyun 	return res;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun static int
pm8001_exec_internal_task_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_device * pm8001_dev,struct domain_device * dev,u32 flag,u32 task_tag)802*4882a593Smuzhiyun pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
803*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
804*4882a593Smuzhiyun 	u32 task_tag)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	int res, retry;
807*4882a593Smuzhiyun 	u32 ccb_tag;
808*4882a593Smuzhiyun 	struct pm8001_ccb_info *ccb;
809*4882a593Smuzhiyun 	struct sas_task *task = NULL;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	for (retry = 0; retry < 3; retry++) {
812*4882a593Smuzhiyun 		task = sas_alloc_slow_task(GFP_KERNEL);
813*4882a593Smuzhiyun 		if (!task)
814*4882a593Smuzhiyun 			return -ENOMEM;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 		task->dev = dev;
817*4882a593Smuzhiyun 		task->task_proto = dev->tproto;
818*4882a593Smuzhiyun 		task->task_done = pm8001_task_done;
819*4882a593Smuzhiyun 		task->slow_task->timer.function = pm8001_tmf_timedout;
820*4882a593Smuzhiyun 		task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
821*4882a593Smuzhiyun 		add_timer(&task->slow_task->timer);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 		res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
824*4882a593Smuzhiyun 		if (res)
825*4882a593Smuzhiyun 			goto ex_err;
826*4882a593Smuzhiyun 		ccb = &pm8001_ha->ccb_info[ccb_tag];
827*4882a593Smuzhiyun 		ccb->device = pm8001_dev;
828*4882a593Smuzhiyun 		ccb->ccb_tag = ccb_tag;
829*4882a593Smuzhiyun 		ccb->task = task;
830*4882a593Smuzhiyun 		ccb->n_elem = 0;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 		res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
833*4882a593Smuzhiyun 			pm8001_dev, flag, task_tag, ccb_tag);
834*4882a593Smuzhiyun 		if (res) {
835*4882a593Smuzhiyun 			del_timer(&task->slow_task->timer);
836*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
837*4882a593Smuzhiyun 			pm8001_tag_free(pm8001_ha, ccb_tag);
838*4882a593Smuzhiyun 			goto ex_err;
839*4882a593Smuzhiyun 		}
840*4882a593Smuzhiyun 		wait_for_completion(&task->slow_task->completion);
841*4882a593Smuzhiyun 		res = TMF_RESP_FUNC_FAILED;
842*4882a593Smuzhiyun 		/* Even TMF timed out, return direct. */
843*4882a593Smuzhiyun 		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
844*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
845*4882a593Smuzhiyun 			goto ex_err;
846*4882a593Smuzhiyun 		}
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
849*4882a593Smuzhiyun 			task->task_status.stat == SAM_STAT_GOOD) {
850*4882a593Smuzhiyun 			res = TMF_RESP_FUNC_COMPLETE;
851*4882a593Smuzhiyun 			break;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 		} else {
854*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, EH,
855*4882a593Smuzhiyun 				   " Task to dev %016llx response: 0x%x status 0x%x\n",
856*4882a593Smuzhiyun 				   SAS_ADDR(dev->sas_addr),
857*4882a593Smuzhiyun 				   task->task_status.resp,
858*4882a593Smuzhiyun 				   task->task_status.stat);
859*4882a593Smuzhiyun 			sas_free_task(task);
860*4882a593Smuzhiyun 			task = NULL;
861*4882a593Smuzhiyun 		}
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun ex_err:
864*4882a593Smuzhiyun 	BUG_ON(retry == 3 && task != NULL);
865*4882a593Smuzhiyun 	sas_free_task(task);
866*4882a593Smuzhiyun 	return res;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun /**
870*4882a593Smuzhiyun   * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
871*4882a593Smuzhiyun   * @dev: the device structure which sas layer used.
872*4882a593Smuzhiyun   */
pm8001_dev_gone_notify(struct domain_device * dev)873*4882a593Smuzhiyun static void pm8001_dev_gone_notify(struct domain_device *dev)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	unsigned long flags = 0;
876*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha;
877*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	pm8001_ha = pm8001_find_ha_by_dev(dev);
880*4882a593Smuzhiyun 	spin_lock_irqsave(&pm8001_ha->lock, flags);
881*4882a593Smuzhiyun 	if (pm8001_dev) {
882*4882a593Smuzhiyun 		u32 device_id = pm8001_dev->device_id;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 		pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
885*4882a593Smuzhiyun 			   pm8001_dev->device_id, pm8001_dev->dev_type);
886*4882a593Smuzhiyun 		if (atomic_read(&pm8001_dev->running_req)) {
887*4882a593Smuzhiyun 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
888*4882a593Smuzhiyun 			pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
889*4882a593Smuzhiyun 				dev, 1, 0);
890*4882a593Smuzhiyun 			while (atomic_read(&pm8001_dev->running_req))
891*4882a593Smuzhiyun 				msleep(20);
892*4882a593Smuzhiyun 			spin_lock_irqsave(&pm8001_ha->lock, flags);
893*4882a593Smuzhiyun 		}
894*4882a593Smuzhiyun 		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
895*4882a593Smuzhiyun 		pm8001_free_dev(pm8001_dev);
896*4882a593Smuzhiyun 	} else {
897*4882a593Smuzhiyun 		pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 	dev->lldd_dev = NULL;
900*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun 
pm8001_dev_gone(struct domain_device * dev)903*4882a593Smuzhiyun void pm8001_dev_gone(struct domain_device *dev)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun 	pm8001_dev_gone_notify(dev);
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
pm8001_issue_ssp_tmf(struct domain_device * dev,u8 * lun,struct pm8001_tmf_task * tmf)908*4882a593Smuzhiyun static int pm8001_issue_ssp_tmf(struct domain_device *dev,
909*4882a593Smuzhiyun 	u8 *lun, struct pm8001_tmf_task *tmf)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	struct sas_ssp_task ssp_task;
912*4882a593Smuzhiyun 	if (!(dev->tproto & SAS_PROTOCOL_SSP))
913*4882a593Smuzhiyun 		return TMF_RESP_FUNC_ESUPP;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	strncpy((u8 *)&ssp_task.LUN, lun, 8);
916*4882a593Smuzhiyun 	return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
917*4882a593Smuzhiyun 		tmf);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)921*4882a593Smuzhiyun void pm8001_open_reject_retry(
922*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha,
923*4882a593Smuzhiyun 	struct sas_task *task_to_close,
924*4882a593Smuzhiyun 	struct pm8001_device *device_to_close)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun 	int i;
927*4882a593Smuzhiyun 	unsigned long flags;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (pm8001_ha == NULL)
930*4882a593Smuzhiyun 		return;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	spin_lock_irqsave(&pm8001_ha->lock, flags);
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	for (i = 0; i < PM8001_MAX_CCB; i++) {
935*4882a593Smuzhiyun 		struct sas_task *task;
936*4882a593Smuzhiyun 		struct task_status_struct *ts;
937*4882a593Smuzhiyun 		struct pm8001_device *pm8001_dev;
938*4882a593Smuzhiyun 		unsigned long flags1;
939*4882a593Smuzhiyun 		u32 tag;
940*4882a593Smuzhiyun 		struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 		pm8001_dev = ccb->device;
943*4882a593Smuzhiyun 		if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
944*4882a593Smuzhiyun 			continue;
945*4882a593Smuzhiyun 		if (!device_to_close) {
946*4882a593Smuzhiyun 			uintptr_t d = (uintptr_t)pm8001_dev
947*4882a593Smuzhiyun 					- (uintptr_t)&pm8001_ha->devices;
948*4882a593Smuzhiyun 			if (((d % sizeof(*pm8001_dev)) != 0)
949*4882a593Smuzhiyun 			 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
950*4882a593Smuzhiyun 				continue;
951*4882a593Smuzhiyun 		} else if (pm8001_dev != device_to_close)
952*4882a593Smuzhiyun 			continue;
953*4882a593Smuzhiyun 		tag = ccb->ccb_tag;
954*4882a593Smuzhiyun 		if (!tag || (tag == 0xFFFFFFFF))
955*4882a593Smuzhiyun 			continue;
956*4882a593Smuzhiyun 		task = ccb->task;
957*4882a593Smuzhiyun 		if (!task || !task->task_done)
958*4882a593Smuzhiyun 			continue;
959*4882a593Smuzhiyun 		if (task_to_close && (task != task_to_close))
960*4882a593Smuzhiyun 			continue;
961*4882a593Smuzhiyun 		ts = &task->task_status;
962*4882a593Smuzhiyun 		ts->resp = SAS_TASK_COMPLETE;
963*4882a593Smuzhiyun 		/* Force the midlayer to retry */
964*4882a593Smuzhiyun 		ts->stat = SAS_OPEN_REJECT;
965*4882a593Smuzhiyun 		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
966*4882a593Smuzhiyun 		if (pm8001_dev)
967*4882a593Smuzhiyun 			atomic_dec(&pm8001_dev->running_req);
968*4882a593Smuzhiyun 		spin_lock_irqsave(&task->task_state_lock, flags1);
969*4882a593Smuzhiyun 		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
970*4882a593Smuzhiyun 		task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
971*4882a593Smuzhiyun 		task->task_state_flags |= SAS_TASK_STATE_DONE;
972*4882a593Smuzhiyun 		if (unlikely((task->task_state_flags
973*4882a593Smuzhiyun 				& SAS_TASK_STATE_ABORTED))) {
974*4882a593Smuzhiyun 			spin_unlock_irqrestore(&task->task_state_lock,
975*4882a593Smuzhiyun 				flags1);
976*4882a593Smuzhiyun 			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
977*4882a593Smuzhiyun 		} else {
978*4882a593Smuzhiyun 			spin_unlock_irqrestore(&task->task_state_lock,
979*4882a593Smuzhiyun 				flags1);
980*4882a593Smuzhiyun 			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
981*4882a593Smuzhiyun 			mb();/* in order to force CPU ordering */
982*4882a593Smuzhiyun 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
983*4882a593Smuzhiyun 			task->task_done(task);
984*4882a593Smuzhiyun 			spin_lock_irqsave(&pm8001_ha->lock, flags);
985*4882a593Smuzhiyun 		}
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun /**
992*4882a593Smuzhiyun   * Standard mandates link reset for ATA  (type 0) and hard reset for
993*4882a593Smuzhiyun   * SSP (type 1) , only for RECOVERY
994*4882a593Smuzhiyun   * @dev: the device structure for the device to reset.
995*4882a593Smuzhiyun   */
pm8001_I_T_nexus_reset(struct domain_device * dev)996*4882a593Smuzhiyun int pm8001_I_T_nexus_reset(struct domain_device *dev)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun 	int rc = TMF_RESP_FUNC_FAILED;
999*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev;
1000*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha;
1001*4882a593Smuzhiyun 	struct sas_phy *phy;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	if (!dev || !dev->lldd_dev)
1004*4882a593Smuzhiyun 		return -ENODEV;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	pm8001_dev = dev->lldd_dev;
1007*4882a593Smuzhiyun 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1008*4882a593Smuzhiyun 	phy = sas_get_local_phy(dev);
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	if (dev_is_sata(dev)) {
1011*4882a593Smuzhiyun 		if (scsi_is_sas_phy_local(phy)) {
1012*4882a593Smuzhiyun 			rc = 0;
1013*4882a593Smuzhiyun 			goto out;
1014*4882a593Smuzhiyun 		}
1015*4882a593Smuzhiyun 		rc = sas_phy_reset(phy, 1);
1016*4882a593Smuzhiyun 		if (rc) {
1017*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, EH,
1018*4882a593Smuzhiyun 				   "phy reset failed for device %x\n"
1019*4882a593Smuzhiyun 				   "with rc %d\n", pm8001_dev->device_id, rc);
1020*4882a593Smuzhiyun 			rc = TMF_RESP_FUNC_FAILED;
1021*4882a593Smuzhiyun 			goto out;
1022*4882a593Smuzhiyun 		}
1023*4882a593Smuzhiyun 		msleep(2000);
1024*4882a593Smuzhiyun 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1025*4882a593Smuzhiyun 			dev, 1, 0);
1026*4882a593Smuzhiyun 		if (rc) {
1027*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
1028*4882a593Smuzhiyun 				   "with rc %d\n", pm8001_dev->device_id, rc);
1029*4882a593Smuzhiyun 			rc = TMF_RESP_FUNC_FAILED;
1030*4882a593Smuzhiyun 		}
1031*4882a593Smuzhiyun 	} else {
1032*4882a593Smuzhiyun 		rc = sas_phy_reset(phy, 1);
1033*4882a593Smuzhiyun 		msleep(2000);
1034*4882a593Smuzhiyun 	}
1035*4882a593Smuzhiyun 	pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
1036*4882a593Smuzhiyun 		   pm8001_dev->device_id, rc);
1037*4882a593Smuzhiyun  out:
1038*4882a593Smuzhiyun 	sas_put_local_phy(phy);
1039*4882a593Smuzhiyun 	return rc;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun /*
1043*4882a593Smuzhiyun * This function handle the IT_NEXUS_XXX event or completion
1044*4882a593Smuzhiyun * status code for SSP/SATA/SMP I/O request.
1045*4882a593Smuzhiyun */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)1046*4882a593Smuzhiyun int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun 	int rc = TMF_RESP_FUNC_FAILED;
1049*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev;
1050*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha;
1051*4882a593Smuzhiyun 	struct sas_phy *phy;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	if (!dev || !dev->lldd_dev)
1054*4882a593Smuzhiyun 		return -1;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	pm8001_dev = dev->lldd_dev;
1057*4882a593Smuzhiyun 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	phy = sas_get_local_phy(dev);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	if (dev_is_sata(dev)) {
1064*4882a593Smuzhiyun 		DECLARE_COMPLETION_ONSTACK(completion_setstate);
1065*4882a593Smuzhiyun 		if (scsi_is_sas_phy_local(phy)) {
1066*4882a593Smuzhiyun 			rc = 0;
1067*4882a593Smuzhiyun 			goto out;
1068*4882a593Smuzhiyun 		}
1069*4882a593Smuzhiyun 		/* send internal ssp/sata/smp abort command to FW */
1070*4882a593Smuzhiyun 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1071*4882a593Smuzhiyun 							dev, 1, 0);
1072*4882a593Smuzhiyun 		msleep(100);
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 		/* deregister the target device */
1075*4882a593Smuzhiyun 		pm8001_dev_gone_notify(dev);
1076*4882a593Smuzhiyun 		msleep(200);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 		/*send phy reset to hard reset target */
1079*4882a593Smuzhiyun 		rc = sas_phy_reset(phy, 1);
1080*4882a593Smuzhiyun 		msleep(2000);
1081*4882a593Smuzhiyun 		pm8001_dev->setds_completion = &completion_setstate;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 		wait_for_completion(&completion_setstate);
1084*4882a593Smuzhiyun 	} else {
1085*4882a593Smuzhiyun 		/* send internal ssp/sata/smp abort command to FW */
1086*4882a593Smuzhiyun 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1087*4882a593Smuzhiyun 							dev, 1, 0);
1088*4882a593Smuzhiyun 		msleep(100);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 		/* deregister the target device */
1091*4882a593Smuzhiyun 		pm8001_dev_gone_notify(dev);
1092*4882a593Smuzhiyun 		msleep(200);
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 		/*send phy reset to hard reset target */
1095*4882a593Smuzhiyun 		rc = sas_phy_reset(phy, 1);
1096*4882a593Smuzhiyun 		msleep(2000);
1097*4882a593Smuzhiyun 	}
1098*4882a593Smuzhiyun 	pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
1099*4882a593Smuzhiyun 		   pm8001_dev->device_id, rc);
1100*4882a593Smuzhiyun out:
1101*4882a593Smuzhiyun 	sas_put_local_phy(phy);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	return rc;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)1106*4882a593Smuzhiyun int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun 	int rc = TMF_RESP_FUNC_FAILED;
1109*4882a593Smuzhiyun 	struct pm8001_tmf_task tmf_task;
1110*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1111*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1112*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
1113*4882a593Smuzhiyun 	if (dev_is_sata(dev)) {
1114*4882a593Smuzhiyun 		struct sas_phy *phy = sas_get_local_phy(dev);
1115*4882a593Smuzhiyun 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1116*4882a593Smuzhiyun 			dev, 1, 0);
1117*4882a593Smuzhiyun 		rc = sas_phy_reset(phy, 1);
1118*4882a593Smuzhiyun 		sas_put_local_phy(phy);
1119*4882a593Smuzhiyun 		pm8001_dev->setds_completion = &completion_setstate;
1120*4882a593Smuzhiyun 		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1121*4882a593Smuzhiyun 			pm8001_dev, 0x01);
1122*4882a593Smuzhiyun 		wait_for_completion(&completion_setstate);
1123*4882a593Smuzhiyun 	} else {
1124*4882a593Smuzhiyun 		tmf_task.tmf = TMF_LU_RESET;
1125*4882a593Smuzhiyun 		rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1126*4882a593Smuzhiyun 	}
1127*4882a593Smuzhiyun 	/* If failed, fall-through I_T_Nexus reset */
1128*4882a593Smuzhiyun 	pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
1129*4882a593Smuzhiyun 		   pm8001_dev->device_id, rc);
1130*4882a593Smuzhiyun 	return rc;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)1134*4882a593Smuzhiyun int pm8001_query_task(struct sas_task *task)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun 	u32 tag = 0xdeadbeef;
1137*4882a593Smuzhiyun 	struct scsi_lun lun;
1138*4882a593Smuzhiyun 	struct pm8001_tmf_task tmf_task;
1139*4882a593Smuzhiyun 	int rc = TMF_RESP_FUNC_FAILED;
1140*4882a593Smuzhiyun 	if (unlikely(!task || !task->lldd_task || !task->dev))
1141*4882a593Smuzhiyun 		return rc;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1144*4882a593Smuzhiyun 		struct scsi_cmnd *cmnd = task->uldd_task;
1145*4882a593Smuzhiyun 		struct domain_device *dev = task->dev;
1146*4882a593Smuzhiyun 		struct pm8001_hba_info *pm8001_ha =
1147*4882a593Smuzhiyun 			pm8001_find_ha_by_dev(dev);
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 		int_to_scsilun(cmnd->device->lun, &lun);
1150*4882a593Smuzhiyun 		rc = pm8001_find_tag(task, &tag);
1151*4882a593Smuzhiyun 		if (rc == 0) {
1152*4882a593Smuzhiyun 			rc = TMF_RESP_FUNC_FAILED;
1153*4882a593Smuzhiyun 			return rc;
1154*4882a593Smuzhiyun 		}
1155*4882a593Smuzhiyun 		pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
1156*4882a593Smuzhiyun 		tmf_task.tmf = 	TMF_QUERY_TASK;
1157*4882a593Smuzhiyun 		tmf_task.tag_of_task_to_be_managed = tag;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1160*4882a593Smuzhiyun 		switch (rc) {
1161*4882a593Smuzhiyun 		/* The task is still in Lun, release it then */
1162*4882a593Smuzhiyun 		case TMF_RESP_FUNC_SUCC:
1163*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, EH,
1164*4882a593Smuzhiyun 				   "The task is still in Lun\n");
1165*4882a593Smuzhiyun 			break;
1166*4882a593Smuzhiyun 		/* The task is not in Lun or failed, reset the phy */
1167*4882a593Smuzhiyun 		case TMF_RESP_FUNC_FAILED:
1168*4882a593Smuzhiyun 		case TMF_RESP_FUNC_COMPLETE:
1169*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, EH,
1170*4882a593Smuzhiyun 				   "The task is not in Lun or failed, reset the phy\n");
1171*4882a593Smuzhiyun 			break;
1172*4882a593Smuzhiyun 		}
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 	pr_err("pm80xx: rc= %d\n", rc);
1175*4882a593Smuzhiyun 	return rc;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun /*  mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)1179*4882a593Smuzhiyun int pm8001_abort_task(struct sas_task *task)
1180*4882a593Smuzhiyun {
1181*4882a593Smuzhiyun 	unsigned long flags;
1182*4882a593Smuzhiyun 	u32 tag;
1183*4882a593Smuzhiyun 	struct domain_device *dev ;
1184*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha;
1185*4882a593Smuzhiyun 	struct scsi_lun lun;
1186*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev;
1187*4882a593Smuzhiyun 	struct pm8001_tmf_task tmf_task;
1188*4882a593Smuzhiyun 	int rc = TMF_RESP_FUNC_FAILED, ret;
1189*4882a593Smuzhiyun 	u32 phy_id;
1190*4882a593Smuzhiyun 	struct sas_task_slow slow_task;
1191*4882a593Smuzhiyun 	if (unlikely(!task || !task->lldd_task || !task->dev))
1192*4882a593Smuzhiyun 		return TMF_RESP_FUNC_FAILED;
1193*4882a593Smuzhiyun 	dev = task->dev;
1194*4882a593Smuzhiyun 	pm8001_dev = dev->lldd_dev;
1195*4882a593Smuzhiyun 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1196*4882a593Smuzhiyun 	phy_id = pm8001_dev->attached_phy;
1197*4882a593Smuzhiyun 	ret = pm8001_find_tag(task, &tag);
1198*4882a593Smuzhiyun 	if (ret == 0) {
1199*4882a593Smuzhiyun 		pm8001_printk("no tag for task:%p\n", task);
1200*4882a593Smuzhiyun 		return TMF_RESP_FUNC_FAILED;
1201*4882a593Smuzhiyun 	}
1202*4882a593Smuzhiyun 	spin_lock_irqsave(&task->task_state_lock, flags);
1203*4882a593Smuzhiyun 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1204*4882a593Smuzhiyun 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1205*4882a593Smuzhiyun 		return TMF_RESP_FUNC_COMPLETE;
1206*4882a593Smuzhiyun 	}
1207*4882a593Smuzhiyun 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1208*4882a593Smuzhiyun 	if (task->slow_task == NULL) {
1209*4882a593Smuzhiyun 		init_completion(&slow_task.completion);
1210*4882a593Smuzhiyun 		task->slow_task = &slow_task;
1211*4882a593Smuzhiyun 	}
1212*4882a593Smuzhiyun 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1213*4882a593Smuzhiyun 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1214*4882a593Smuzhiyun 		struct scsi_cmnd *cmnd = task->uldd_task;
1215*4882a593Smuzhiyun 		int_to_scsilun(cmnd->device->lun, &lun);
1216*4882a593Smuzhiyun 		tmf_task.tmf = TMF_ABORT_TASK;
1217*4882a593Smuzhiyun 		tmf_task.tag_of_task_to_be_managed = tag;
1218*4882a593Smuzhiyun 		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1219*4882a593Smuzhiyun 		pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1220*4882a593Smuzhiyun 			pm8001_dev->sas_device, 0, tag);
1221*4882a593Smuzhiyun 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1222*4882a593Smuzhiyun 		task->task_proto & SAS_PROTOCOL_STP) {
1223*4882a593Smuzhiyun 		if (pm8001_ha->chip_id == chip_8006) {
1224*4882a593Smuzhiyun 			DECLARE_COMPLETION_ONSTACK(completion_reset);
1225*4882a593Smuzhiyun 			DECLARE_COMPLETION_ONSTACK(completion);
1226*4882a593Smuzhiyun 			struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 			/* 1. Set Device state as Recovery */
1229*4882a593Smuzhiyun 			pm8001_dev->setds_completion = &completion;
1230*4882a593Smuzhiyun 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1231*4882a593Smuzhiyun 				pm8001_dev, 0x03);
1232*4882a593Smuzhiyun 			wait_for_completion(&completion);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 			/* 2. Send Phy Control Hard Reset */
1235*4882a593Smuzhiyun 			reinit_completion(&completion);
1236*4882a593Smuzhiyun 			phy->port_reset_status = PORT_RESET_TMO;
1237*4882a593Smuzhiyun 			phy->reset_success = false;
1238*4882a593Smuzhiyun 			phy->enable_completion = &completion;
1239*4882a593Smuzhiyun 			phy->reset_completion = &completion_reset;
1240*4882a593Smuzhiyun 			ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1241*4882a593Smuzhiyun 				PHY_HARD_RESET);
1242*4882a593Smuzhiyun 			if (ret) {
1243*4882a593Smuzhiyun 				phy->enable_completion = NULL;
1244*4882a593Smuzhiyun 				phy->reset_completion = NULL;
1245*4882a593Smuzhiyun 				goto out;
1246*4882a593Smuzhiyun 			}
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 			/* In the case of the reset timeout/fail we still
1249*4882a593Smuzhiyun 			 * abort the command at the firmware. The assumption
1250*4882a593Smuzhiyun 			 * here is that the drive is off doing something so
1251*4882a593Smuzhiyun 			 * that it's not processing requests, and we want to
1252*4882a593Smuzhiyun 			 * avoid getting a completion for this and either
1253*4882a593Smuzhiyun 			 * leaking the task in libsas or losing the race and
1254*4882a593Smuzhiyun 			 * getting a double free.
1255*4882a593Smuzhiyun 			 */
1256*4882a593Smuzhiyun 			pm8001_dbg(pm8001_ha, MSG,
1257*4882a593Smuzhiyun 				   "Waiting for local phy ctl\n");
1258*4882a593Smuzhiyun 			ret = wait_for_completion_timeout(&completion,
1259*4882a593Smuzhiyun 					PM8001_TASK_TIMEOUT * HZ);
1260*4882a593Smuzhiyun 			if (!ret || !phy->reset_success) {
1261*4882a593Smuzhiyun 				phy->enable_completion = NULL;
1262*4882a593Smuzhiyun 				phy->reset_completion = NULL;
1263*4882a593Smuzhiyun 			} else {
1264*4882a593Smuzhiyun 				/* 3. Wait for Port Reset complete or
1265*4882a593Smuzhiyun 				 * Port reset TMO
1266*4882a593Smuzhiyun 				 */
1267*4882a593Smuzhiyun 				pm8001_dbg(pm8001_ha, MSG,
1268*4882a593Smuzhiyun 					   "Waiting for Port reset\n");
1269*4882a593Smuzhiyun 				ret = wait_for_completion_timeout(
1270*4882a593Smuzhiyun 					&completion_reset,
1271*4882a593Smuzhiyun 					PM8001_TASK_TIMEOUT * HZ);
1272*4882a593Smuzhiyun 				if (!ret)
1273*4882a593Smuzhiyun 					phy->reset_completion = NULL;
1274*4882a593Smuzhiyun 				WARN_ON(phy->port_reset_status ==
1275*4882a593Smuzhiyun 						PORT_RESET_TMO);
1276*4882a593Smuzhiyun 				if (phy->port_reset_status == PORT_RESET_TMO) {
1277*4882a593Smuzhiyun 					pm8001_dev_gone_notify(dev);
1278*4882a593Smuzhiyun 					goto out;
1279*4882a593Smuzhiyun 				}
1280*4882a593Smuzhiyun 			}
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 			/*
1283*4882a593Smuzhiyun 			 * 4. SATA Abort ALL
1284*4882a593Smuzhiyun 			 * we wait for the task to be aborted so that the task
1285*4882a593Smuzhiyun 			 * is removed from the ccb. on success the caller is
1286*4882a593Smuzhiyun 			 * going to free the task.
1287*4882a593Smuzhiyun 			 */
1288*4882a593Smuzhiyun 			ret = pm8001_exec_internal_task_abort(pm8001_ha,
1289*4882a593Smuzhiyun 				pm8001_dev, pm8001_dev->sas_device, 1, tag);
1290*4882a593Smuzhiyun 			if (ret)
1291*4882a593Smuzhiyun 				goto out;
1292*4882a593Smuzhiyun 			ret = wait_for_completion_timeout(
1293*4882a593Smuzhiyun 				&task->slow_task->completion,
1294*4882a593Smuzhiyun 				PM8001_TASK_TIMEOUT * HZ);
1295*4882a593Smuzhiyun 			if (!ret)
1296*4882a593Smuzhiyun 				goto out;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 			/* 5. Set Device State as Operational */
1299*4882a593Smuzhiyun 			reinit_completion(&completion);
1300*4882a593Smuzhiyun 			pm8001_dev->setds_completion = &completion;
1301*4882a593Smuzhiyun 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1302*4882a593Smuzhiyun 				pm8001_dev, 0x01);
1303*4882a593Smuzhiyun 			wait_for_completion(&completion);
1304*4882a593Smuzhiyun 		} else {
1305*4882a593Smuzhiyun 			rc = pm8001_exec_internal_task_abort(pm8001_ha,
1306*4882a593Smuzhiyun 				pm8001_dev, pm8001_dev->sas_device, 0, tag);
1307*4882a593Smuzhiyun 		}
1308*4882a593Smuzhiyun 		rc = TMF_RESP_FUNC_COMPLETE;
1309*4882a593Smuzhiyun 	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
1310*4882a593Smuzhiyun 		/* SMP */
1311*4882a593Smuzhiyun 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1312*4882a593Smuzhiyun 			pm8001_dev->sas_device, 0, tag);
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	}
1315*4882a593Smuzhiyun out:
1316*4882a593Smuzhiyun 	spin_lock_irqsave(&task->task_state_lock, flags);
1317*4882a593Smuzhiyun 	if (task->slow_task == &slow_task)
1318*4882a593Smuzhiyun 		task->slow_task = NULL;
1319*4882a593Smuzhiyun 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1320*4882a593Smuzhiyun 	if (rc != TMF_RESP_FUNC_COMPLETE)
1321*4882a593Smuzhiyun 		pm8001_printk("rc= %d\n", rc);
1322*4882a593Smuzhiyun 	return rc;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun 
pm8001_abort_task_set(struct domain_device * dev,u8 * lun)1325*4882a593Smuzhiyun int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	struct pm8001_tmf_task tmf_task;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	tmf_task.tmf = TMF_ABORT_TASK_SET;
1330*4882a593Smuzhiyun 	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun 
pm8001_clear_aca(struct domain_device * dev,u8 * lun)1333*4882a593Smuzhiyun int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun 	struct pm8001_tmf_task tmf_task;
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	tmf_task.tmf = TMF_CLEAR_ACA;
1338*4882a593Smuzhiyun 	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun 
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1341*4882a593Smuzhiyun int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun 	struct pm8001_tmf_task tmf_task;
1344*4882a593Smuzhiyun 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1345*4882a593Smuzhiyun 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1348*4882a593Smuzhiyun 		   pm8001_dev->device_id);
1349*4882a593Smuzhiyun 	tmf_task.tmf = TMF_CLEAR_TASK_SET;
1350*4882a593Smuzhiyun 	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun 
1353