xref: /OK3568_Linux_fs/kernel/drivers/scsi/isci/port.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is provided under a dual BSD/GPLv2 license.  When using or
3*4882a593Smuzhiyun  * redistributing this file, you may do so under either license.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * GPL LICENSE SUMMARY
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun  * published by the Free Software Foundation.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16*4882a593Smuzhiyun  * General Public License for more details.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
19*4882a593Smuzhiyun  * along with this program; if not, write to the Free Software
20*4882a593Smuzhiyun  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21*4882a593Smuzhiyun  * The full GNU General Public License is included in this distribution
22*4882a593Smuzhiyun  * in the file called LICENSE.GPL.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * BSD LICENSE
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27*4882a593Smuzhiyun  * All rights reserved.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
30*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
31*4882a593Smuzhiyun  * are met:
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  *   * Redistributions of source code must retain the above copyright
34*4882a593Smuzhiyun  *     notice, this list of conditions and the following disclaimer.
35*4882a593Smuzhiyun  *   * Redistributions in binary form must reproduce the above copyright
36*4882a593Smuzhiyun  *     notice, this list of conditions and the following disclaimer in
37*4882a593Smuzhiyun  *     the documentation and/or other materials provided with the
38*4882a593Smuzhiyun  *     distribution.
39*4882a593Smuzhiyun  *   * Neither the name of Intel Corporation nor the names of its
40*4882a593Smuzhiyun  *     contributors may be used to endorse or promote products derived
41*4882a593Smuzhiyun  *     from this software without specific prior written permission.
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #include "isci.h"
57*4882a593Smuzhiyun #include "port.h"
58*4882a593Smuzhiyun #include "request.h"
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT  (1000)
61*4882a593Smuzhiyun #define SCU_DUMMY_INDEX    (0xFFFF)
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #undef C
64*4882a593Smuzhiyun #define C(a) (#a)
port_state_name(enum sci_port_states state)65*4882a593Smuzhiyun const char *port_state_name(enum sci_port_states state)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	static const char * const strings[] = PORT_STATES;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return strings[state];
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun #undef C
72*4882a593Smuzhiyun 
sciport_to_dev(struct isci_port * iport)73*4882a593Smuzhiyun static struct device *sciport_to_dev(struct isci_port *iport)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	int i = iport->physical_port_index;
76*4882a593Smuzhiyun 	struct isci_port *table;
77*4882a593Smuzhiyun 	struct isci_host *ihost;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	if (i == SCIC_SDS_DUMMY_PORT)
80*4882a593Smuzhiyun 		i = SCI_MAX_PORTS+1;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	table = iport - i;
83*4882a593Smuzhiyun 	ihost = container_of(table, typeof(*ihost), ports[0]);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	return &ihost->pdev->dev;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
sci_port_get_protocols(struct isci_port * iport,struct sci_phy_proto * proto)88*4882a593Smuzhiyun static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	u8 index;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	proto->all = 0;
93*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++) {
94*4882a593Smuzhiyun 		struct isci_phy *iphy = iport->phy_table[index];
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 		if (!iphy)
97*4882a593Smuzhiyun 			continue;
98*4882a593Smuzhiyun 		sci_phy_get_protocols(iphy, proto);
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
sci_port_get_phys(struct isci_port * iport)102*4882a593Smuzhiyun static u32 sci_port_get_phys(struct isci_port *iport)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	u32 index;
105*4882a593Smuzhiyun 	u32 mask;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	mask = 0;
108*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++)
109*4882a593Smuzhiyun 		if (iport->phy_table[index])
110*4882a593Smuzhiyun 			mask |= (1 << index);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return mask;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun  * sci_port_get_properties() - This method simply returns the properties
117*4882a593Smuzhiyun  *    regarding the port, such as: physical index, protocols, sas address, etc.
118*4882a593Smuzhiyun  * @port: this parameter specifies the port for which to retrieve the physical
119*4882a593Smuzhiyun  *    index.
120*4882a593Smuzhiyun  * @properties: This parameter specifies the properties structure into which to
121*4882a593Smuzhiyun  *    copy the requested information.
122*4882a593Smuzhiyun  *
123*4882a593Smuzhiyun  * Indicate if the user specified a valid port. SCI_SUCCESS This value is
124*4882a593Smuzhiyun  * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
125*4882a593Smuzhiyun  * value is returned if the specified port is not valid.  When this value is
126*4882a593Smuzhiyun  * returned, no data is copied to the properties output parameter.
127*4882a593Smuzhiyun  */
sci_port_get_properties(struct isci_port * iport,struct sci_port_properties * prop)128*4882a593Smuzhiyun enum sci_status sci_port_get_properties(struct isci_port *iport,
129*4882a593Smuzhiyun 						struct sci_port_properties *prop)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
132*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_PORT;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	prop->index = iport->logical_port_index;
135*4882a593Smuzhiyun 	prop->phy_mask = sci_port_get_phys(iport);
136*4882a593Smuzhiyun 	sci_port_get_sas_address(iport, &prop->local.sas_address);
137*4882a593Smuzhiyun 	sci_port_get_protocols(iport, &prop->local.protocols);
138*4882a593Smuzhiyun 	sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return SCI_SUCCESS;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
sci_port_bcn_enable(struct isci_port * iport)143*4882a593Smuzhiyun static void sci_port_bcn_enable(struct isci_port *iport)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct isci_phy *iphy;
146*4882a593Smuzhiyun 	u32 val;
147*4882a593Smuzhiyun 	int i;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
150*4882a593Smuzhiyun 		iphy = iport->phy_table[i];
151*4882a593Smuzhiyun 		if (!iphy)
152*4882a593Smuzhiyun 			continue;
153*4882a593Smuzhiyun 		val = readl(&iphy->link_layer_registers->link_layer_control);
154*4882a593Smuzhiyun 		/* clear the bit by writing 1. */
155*4882a593Smuzhiyun 		writel(val, &iphy->link_layer_registers->link_layer_control);
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
isci_port_bc_change_received(struct isci_host * ihost,struct isci_port * iport,struct isci_phy * iphy)159*4882a593Smuzhiyun static void isci_port_bc_change_received(struct isci_host *ihost,
160*4882a593Smuzhiyun 					 struct isci_port *iport,
161*4882a593Smuzhiyun 					 struct isci_phy *iphy)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	dev_dbg(&ihost->pdev->dev,
164*4882a593Smuzhiyun 		"%s: isci_phy = %p, sas_phy = %p\n",
165*4882a593Smuzhiyun 		__func__, iphy, &iphy->sas_phy);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	sas_notify_port_event_gfp(&iphy->sas_phy,
168*4882a593Smuzhiyun 				  PORTE_BROADCAST_RCVD, GFP_ATOMIC);
169*4882a593Smuzhiyun 	sci_port_bcn_enable(iport);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
isci_port_link_up(struct isci_host * isci_host,struct isci_port * iport,struct isci_phy * iphy)172*4882a593Smuzhiyun static void isci_port_link_up(struct isci_host *isci_host,
173*4882a593Smuzhiyun 			      struct isci_port *iport,
174*4882a593Smuzhiyun 			      struct isci_phy *iphy)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	unsigned long flags;
177*4882a593Smuzhiyun 	struct sci_port_properties properties;
178*4882a593Smuzhiyun 	unsigned long success = true;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	dev_dbg(&isci_host->pdev->dev,
181*4882a593Smuzhiyun 		"%s: isci_port = %p\n",
182*4882a593Smuzhiyun 		__func__, iport);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	sci_port_get_properties(iport, &properties);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (iphy->protocol == SAS_PROTOCOL_SATA) {
189*4882a593Smuzhiyun 		u64 attached_sas_address;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 		iphy->sas_phy.oob_mode = SATA_OOB_MODE;
192*4882a593Smuzhiyun 		iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		/*
195*4882a593Smuzhiyun 		 * For direct-attached SATA devices, the SCI core will
196*4882a593Smuzhiyun 		 * automagically assign a SAS address to the end device
197*4882a593Smuzhiyun 		 * for the purpose of creating a port. This SAS address
198*4882a593Smuzhiyun 		 * will not be the same as assigned to the PHY and needs
199*4882a593Smuzhiyun 		 * to be obtained from struct sci_port_properties properties.
200*4882a593Smuzhiyun 		 */
201*4882a593Smuzhiyun 		attached_sas_address = properties.remote.sas_address.high;
202*4882a593Smuzhiyun 		attached_sas_address <<= 32;
203*4882a593Smuzhiyun 		attached_sas_address |= properties.remote.sas_address.low;
204*4882a593Smuzhiyun 		swab64s(&attached_sas_address);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		memcpy(&iphy->sas_phy.attached_sas_addr,
207*4882a593Smuzhiyun 		       &attached_sas_address, sizeof(attached_sas_address));
208*4882a593Smuzhiyun 	} else if (iphy->protocol == SAS_PROTOCOL_SSP) {
209*4882a593Smuzhiyun 		iphy->sas_phy.oob_mode = SAS_OOB_MODE;
210*4882a593Smuzhiyun 		iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		/* Copy the attached SAS address from the IAF */
213*4882a593Smuzhiyun 		memcpy(iphy->sas_phy.attached_sas_addr,
214*4882a593Smuzhiyun 		       iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
215*4882a593Smuzhiyun 	} else {
216*4882a593Smuzhiyun 		dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__);
217*4882a593Smuzhiyun 		success = false;
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Notify libsas that we have an address frame, if indeed
225*4882a593Smuzhiyun 	 * we've found an SSP, SMP, or STP target */
226*4882a593Smuzhiyun 	if (success)
227*4882a593Smuzhiyun 		sas_notify_port_event_gfp(&iphy->sas_phy,
228*4882a593Smuzhiyun 					  PORTE_BYTES_DMAED, GFP_ATOMIC);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /**
233*4882a593Smuzhiyun  * isci_port_link_down() - This function is called by the sci core when a link
234*4882a593Smuzhiyun  *    becomes inactive.
235*4882a593Smuzhiyun  * @isci_host: This parameter specifies the isci host object.
236*4882a593Smuzhiyun  * @phy: This parameter specifies the isci phy with the active link.
237*4882a593Smuzhiyun  * @port: This parameter specifies the isci port with the active link.
238*4882a593Smuzhiyun  *
239*4882a593Smuzhiyun  */
isci_port_link_down(struct isci_host * isci_host,struct isci_phy * isci_phy,struct isci_port * isci_port)240*4882a593Smuzhiyun static void isci_port_link_down(struct isci_host *isci_host,
241*4882a593Smuzhiyun 				struct isci_phy *isci_phy,
242*4882a593Smuzhiyun 				struct isci_port *isci_port)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct isci_remote_device *isci_device;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	dev_dbg(&isci_host->pdev->dev,
247*4882a593Smuzhiyun 		"%s: isci_port = %p\n", __func__, isci_port);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (isci_port) {
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		/* check to see if this is the last phy on this port. */
252*4882a593Smuzhiyun 		if (isci_phy->sas_phy.port &&
253*4882a593Smuzhiyun 		    isci_phy->sas_phy.port->num_phys == 1) {
254*4882a593Smuzhiyun 			/* change the state for all devices on this port.  The
255*4882a593Smuzhiyun 			* next task sent to this device will be returned as
256*4882a593Smuzhiyun 			* SAS_TASK_UNDELIVERED, and the scsi mid layer will
257*4882a593Smuzhiyun 			* remove the target
258*4882a593Smuzhiyun 			*/
259*4882a593Smuzhiyun 			list_for_each_entry(isci_device,
260*4882a593Smuzhiyun 					    &isci_port->remote_dev_list,
261*4882a593Smuzhiyun 					    node) {
262*4882a593Smuzhiyun 				dev_dbg(&isci_host->pdev->dev,
263*4882a593Smuzhiyun 					"%s: isci_device = %p\n",
264*4882a593Smuzhiyun 					__func__, isci_device);
265*4882a593Smuzhiyun 				set_bit(IDEV_GONE, &isci_device->flags);
266*4882a593Smuzhiyun 			}
267*4882a593Smuzhiyun 		}
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/* Notify libsas of the borken link, this will trigger calls to our
271*4882a593Smuzhiyun 	 * isci_port_deformed and isci_dev_gone functions.
272*4882a593Smuzhiyun 	 */
273*4882a593Smuzhiyun 	sas_phy_disconnected(&isci_phy->sas_phy);
274*4882a593Smuzhiyun 	sas_notify_phy_event_gfp(&isci_phy->sas_phy,
275*4882a593Smuzhiyun 				 PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	dev_dbg(&isci_host->pdev->dev,
278*4882a593Smuzhiyun 		"%s: isci_port = %p - Done\n", __func__, isci_port);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
is_port_ready_state(enum sci_port_states state)281*4882a593Smuzhiyun static bool is_port_ready_state(enum sci_port_states state)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	switch (state) {
284*4882a593Smuzhiyun 	case SCI_PORT_READY:
285*4882a593Smuzhiyun 	case SCI_PORT_SUB_WAITING:
286*4882a593Smuzhiyun 	case SCI_PORT_SUB_OPERATIONAL:
287*4882a593Smuzhiyun 	case SCI_PORT_SUB_CONFIGURING:
288*4882a593Smuzhiyun 		return true;
289*4882a593Smuzhiyun 	default:
290*4882a593Smuzhiyun 		return false;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /* flag dummy rnc hanling when exiting a ready state */
port_state_machine_change(struct isci_port * iport,enum sci_port_states state)295*4882a593Smuzhiyun static void port_state_machine_change(struct isci_port *iport,
296*4882a593Smuzhiyun 				      enum sci_port_states state)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct sci_base_state_machine *sm = &iport->sm;
299*4882a593Smuzhiyun 	enum sci_port_states old_state = sm->current_state_id;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (is_port_ready_state(old_state) && !is_port_ready_state(state))
302*4882a593Smuzhiyun 		iport->ready_exit = true;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	sci_change_state(sm, state);
305*4882a593Smuzhiyun 	iport->ready_exit = false;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /**
309*4882a593Smuzhiyun  * isci_port_hard_reset_complete() - This function is called by the sci core
310*4882a593Smuzhiyun  *    when the hard reset complete notification has been received.
311*4882a593Smuzhiyun  * @port: This parameter specifies the sci port with the active link.
312*4882a593Smuzhiyun  * @completion_status: This parameter specifies the core status for the reset
313*4882a593Smuzhiyun  *    process.
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  */
isci_port_hard_reset_complete(struct isci_port * isci_port,enum sci_status completion_status)316*4882a593Smuzhiyun static void isci_port_hard_reset_complete(struct isci_port *isci_port,
317*4882a593Smuzhiyun 					  enum sci_status completion_status)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	struct isci_host *ihost = isci_port->owning_controller;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	dev_dbg(&ihost->pdev->dev,
322*4882a593Smuzhiyun 		"%s: isci_port = %p, completion_status=%x\n",
323*4882a593Smuzhiyun 		     __func__, isci_port, completion_status);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* Save the status of the hard reset from the port. */
326*4882a593Smuzhiyun 	isci_port->hard_reset_status = completion_status;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (completion_status != SCI_SUCCESS) {
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		/* The reset failed.  The port state is now SCI_PORT_FAILED. */
331*4882a593Smuzhiyun 		if (isci_port->active_phy_mask == 0) {
332*4882a593Smuzhiyun 			int phy_idx = isci_port->last_active_phy;
333*4882a593Smuzhiyun 			struct isci_phy *iphy = &ihost->phys[phy_idx];
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 			/* Generate the link down now to the host, since it
336*4882a593Smuzhiyun 			 * was intercepted by the hard reset state machine when
337*4882a593Smuzhiyun 			 * it really happened.
338*4882a593Smuzhiyun 			 */
339*4882a593Smuzhiyun 			isci_port_link_down(ihost, iphy, isci_port);
340*4882a593Smuzhiyun 		}
341*4882a593Smuzhiyun 		/* Advance the port state so that link state changes will be
342*4882a593Smuzhiyun 		 * noticed.
343*4882a593Smuzhiyun 		 */
344*4882a593Smuzhiyun 		port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 	clear_bit(IPORT_RESET_PENDING, &isci_port->state);
348*4882a593Smuzhiyun 	wake_up(&ihost->eventq);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /* This method will return a true value if the specified phy can be assigned to
353*4882a593Smuzhiyun  * this port The following is a list of phys for each port that are allowed: -
354*4882a593Smuzhiyun  * Port 0 - 3 2 1 0 - Port 1 -     1 - Port 2 - 3 2 - Port 3 - 3 This method
355*4882a593Smuzhiyun  * doesn't preclude all configurations.  It merely ensures that a phy is part
356*4882a593Smuzhiyun  * of the allowable set of phy identifiers for that port.  For example, one
357*4882a593Smuzhiyun  * could assign phy 3 to port 0 and no other phys.  Please refer to
358*4882a593Smuzhiyun  * sci_port_is_phy_mask_valid() for information regarding whether the
359*4882a593Smuzhiyun  * phy_mask for a port can be supported. bool true if this is a valid phy
360*4882a593Smuzhiyun  * assignment for the port false if this is not a valid phy assignment for the
361*4882a593Smuzhiyun  * port
362*4882a593Smuzhiyun  */
sci_port_is_valid_phy_assignment(struct isci_port * iport,u32 phy_index)363*4882a593Smuzhiyun bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
366*4882a593Smuzhiyun 	struct sci_user_parameters *user = &ihost->user_parameters;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* Initialize to invalid value. */
369*4882a593Smuzhiyun 	u32 existing_phy_index = SCI_MAX_PHYS;
370*4882a593Smuzhiyun 	u32 index;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if ((iport->physical_port_index == 1) && (phy_index != 1))
373*4882a593Smuzhiyun 		return false;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	if (iport->physical_port_index == 3 && phy_index != 3)
376*4882a593Smuzhiyun 		return false;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (iport->physical_port_index == 2 &&
379*4882a593Smuzhiyun 	    (phy_index == 0 || phy_index == 1))
380*4882a593Smuzhiyun 		return false;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++)
383*4882a593Smuzhiyun 		if (iport->phy_table[index] && index != phy_index)
384*4882a593Smuzhiyun 			existing_phy_index = index;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* Ensure that all of the phys in the port are capable of
387*4882a593Smuzhiyun 	 * operating at the same maximum link rate.
388*4882a593Smuzhiyun 	 */
389*4882a593Smuzhiyun 	if (existing_phy_index < SCI_MAX_PHYS &&
390*4882a593Smuzhiyun 	    user->phys[phy_index].max_speed_generation !=
391*4882a593Smuzhiyun 	    user->phys[existing_phy_index].max_speed_generation)
392*4882a593Smuzhiyun 		return false;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	return true;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun /**
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * @sci_port: This is the port object for which to determine if the phy mask
400*4882a593Smuzhiyun  *    can be supported.
401*4882a593Smuzhiyun  *
402*4882a593Smuzhiyun  * This method will return a true value if the port's phy mask can be supported
403*4882a593Smuzhiyun  * by the SCU. The following is a list of valid PHY mask configurations for
404*4882a593Smuzhiyun  * each port: - Port 0 - [[3  2] 1] 0 - Port 1 -        [1] - Port 2 - [[3] 2]
405*4882a593Smuzhiyun  * - Port 3 -  [3] This method returns a boolean indication specifying if the
406*4882a593Smuzhiyun  * phy mask can be supported. true if this is a valid phy assignment for the
407*4882a593Smuzhiyun  * port false if this is not a valid phy assignment for the port
408*4882a593Smuzhiyun  */
sci_port_is_phy_mask_valid(struct isci_port * iport,u32 phy_mask)409*4882a593Smuzhiyun static bool sci_port_is_phy_mask_valid(
410*4882a593Smuzhiyun 	struct isci_port *iport,
411*4882a593Smuzhiyun 	u32 phy_mask)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	if (iport->physical_port_index == 0) {
414*4882a593Smuzhiyun 		if (((phy_mask & 0x0F) == 0x0F)
415*4882a593Smuzhiyun 		    || ((phy_mask & 0x03) == 0x03)
416*4882a593Smuzhiyun 		    || ((phy_mask & 0x01) == 0x01)
417*4882a593Smuzhiyun 		    || (phy_mask == 0))
418*4882a593Smuzhiyun 			return true;
419*4882a593Smuzhiyun 	} else if (iport->physical_port_index == 1) {
420*4882a593Smuzhiyun 		if (((phy_mask & 0x02) == 0x02)
421*4882a593Smuzhiyun 		    || (phy_mask == 0))
422*4882a593Smuzhiyun 			return true;
423*4882a593Smuzhiyun 	} else if (iport->physical_port_index == 2) {
424*4882a593Smuzhiyun 		if (((phy_mask & 0x0C) == 0x0C)
425*4882a593Smuzhiyun 		    || ((phy_mask & 0x04) == 0x04)
426*4882a593Smuzhiyun 		    || (phy_mask == 0))
427*4882a593Smuzhiyun 			return true;
428*4882a593Smuzhiyun 	} else if (iport->physical_port_index == 3) {
429*4882a593Smuzhiyun 		if (((phy_mask & 0x08) == 0x08)
430*4882a593Smuzhiyun 		    || (phy_mask == 0))
431*4882a593Smuzhiyun 			return true;
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	return false;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun  * This method retrieves a currently active (i.e. connected) phy contained in
439*4882a593Smuzhiyun  * the port.  Currently, the lowest order phy that is connected is returned.
440*4882a593Smuzhiyun  * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
441*4882a593Smuzhiyun  * returned if there are no currently active (i.e. connected to a remote end
442*4882a593Smuzhiyun  * point) phys contained in the port. All other values specify a struct sci_phy
443*4882a593Smuzhiyun  * object that is active in the port.
444*4882a593Smuzhiyun  */
sci_port_get_a_connected_phy(struct isci_port * iport)445*4882a593Smuzhiyun static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	u32 index;
448*4882a593Smuzhiyun 	struct isci_phy *iphy;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++) {
451*4882a593Smuzhiyun 		/* Ensure that the phy is both part of the port and currently
452*4882a593Smuzhiyun 		 * connected to the remote end-point.
453*4882a593Smuzhiyun 		 */
454*4882a593Smuzhiyun 		iphy = iport->phy_table[index];
455*4882a593Smuzhiyun 		if (iphy && sci_port_active_phy(iport, iphy))
456*4882a593Smuzhiyun 			return iphy;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	return NULL;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
sci_port_set_phy(struct isci_port * iport,struct isci_phy * iphy)462*4882a593Smuzhiyun static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	/* Check to see if we can add this phy to a port
465*4882a593Smuzhiyun 	 * that means that the phy is not part of a port and that the port does
466*4882a593Smuzhiyun 	 * not already have a phy assinged to the phy index.
467*4882a593Smuzhiyun 	 */
468*4882a593Smuzhiyun 	if (!iport->phy_table[iphy->phy_index] &&
469*4882a593Smuzhiyun 	    !phy_get_non_dummy_port(iphy) &&
470*4882a593Smuzhiyun 	    sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
471*4882a593Smuzhiyun 		/* Phy is being added in the stopped state so we are in MPC mode
472*4882a593Smuzhiyun 		 * make logical port index = physical port index
473*4882a593Smuzhiyun 		 */
474*4882a593Smuzhiyun 		iport->logical_port_index = iport->physical_port_index;
475*4882a593Smuzhiyun 		iport->phy_table[iphy->phy_index] = iphy;
476*4882a593Smuzhiyun 		sci_phy_set_port(iphy, iport);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 		return SCI_SUCCESS;
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	return SCI_FAILURE;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
sci_port_clear_phy(struct isci_port * iport,struct isci_phy * iphy)484*4882a593Smuzhiyun static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	/* Make sure that this phy is part of this port */
487*4882a593Smuzhiyun 	if (iport->phy_table[iphy->phy_index] == iphy &&
488*4882a593Smuzhiyun 	    phy_get_non_dummy_port(iphy) == iport) {
489*4882a593Smuzhiyun 		struct isci_host *ihost = iport->owning_controller;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		/* Yep it is assigned to this port so remove it */
492*4882a593Smuzhiyun 		sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
493*4882a593Smuzhiyun 		iport->phy_table[iphy->phy_index] = NULL;
494*4882a593Smuzhiyun 		return SCI_SUCCESS;
495*4882a593Smuzhiyun 	}
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	return SCI_FAILURE;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
sci_port_get_sas_address(struct isci_port * iport,struct sci_sas_address * sas)500*4882a593Smuzhiyun void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	u32 index;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	sas->high = 0;
505*4882a593Smuzhiyun 	sas->low  = 0;
506*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++)
507*4882a593Smuzhiyun 		if (iport->phy_table[index])
508*4882a593Smuzhiyun 			sci_phy_get_sas_address(iport->phy_table[index], sas);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
sci_port_get_attached_sas_address(struct isci_port * iport,struct sci_sas_address * sas)511*4882a593Smuzhiyun void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	struct isci_phy *iphy;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/*
516*4882a593Smuzhiyun 	 * Ensure that the phy is both part of the port and currently
517*4882a593Smuzhiyun 	 * connected to the remote end-point.
518*4882a593Smuzhiyun 	 */
519*4882a593Smuzhiyun 	iphy = sci_port_get_a_connected_phy(iport);
520*4882a593Smuzhiyun 	if (iphy) {
521*4882a593Smuzhiyun 		if (iphy->protocol != SAS_PROTOCOL_SATA) {
522*4882a593Smuzhiyun 			sci_phy_get_attached_sas_address(iphy, sas);
523*4882a593Smuzhiyun 		} else {
524*4882a593Smuzhiyun 			sci_phy_get_sas_address(iphy, sas);
525*4882a593Smuzhiyun 			sas->low += iphy->phy_index;
526*4882a593Smuzhiyun 		}
527*4882a593Smuzhiyun 	} else {
528*4882a593Smuzhiyun 		sas->high = 0;
529*4882a593Smuzhiyun 		sas->low  = 0;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /**
534*4882a593Smuzhiyun  * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
535*4882a593Smuzhiyun  *
536*4882a593Smuzhiyun  * @sci_port: logical port on which we need to create the remote node context
537*4882a593Smuzhiyun  * @rni: remote node index for this remote node context.
538*4882a593Smuzhiyun  *
539*4882a593Smuzhiyun  * This routine will construct a dummy remote node context data structure
540*4882a593Smuzhiyun  * This structure will be posted to the hardware to work around a scheduler
541*4882a593Smuzhiyun  * error in the hardware.
542*4882a593Smuzhiyun  */
sci_port_construct_dummy_rnc(struct isci_port * iport,u16 rni)543*4882a593Smuzhiyun static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	union scu_remote_node_context *rnc;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	rnc = &iport->owning_controller->remote_node_context_table[rni];
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	memset(rnc, 0, sizeof(union scu_remote_node_context));
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	rnc->ssp.remote_sas_address_hi = 0;
552*4882a593Smuzhiyun 	rnc->ssp.remote_sas_address_lo = 0;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	rnc->ssp.remote_node_index = rni;
555*4882a593Smuzhiyun 	rnc->ssp.remote_node_port_width = 1;
556*4882a593Smuzhiyun 	rnc->ssp.logical_port_index = iport->physical_port_index;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	rnc->ssp.nexus_loss_timer_enable = false;
559*4882a593Smuzhiyun 	rnc->ssp.check_bit = false;
560*4882a593Smuzhiyun 	rnc->ssp.is_valid = true;
561*4882a593Smuzhiyun 	rnc->ssp.is_remote_node_context = true;
562*4882a593Smuzhiyun 	rnc->ssp.function_number = 0;
563*4882a593Smuzhiyun 	rnc->ssp.arbitration_wait_time = 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun  * construct a dummy task context data structure.  This
568*4882a593Smuzhiyun  * structure will be posted to the hardwre to work around a scheduler error
569*4882a593Smuzhiyun  * in the hardware.
570*4882a593Smuzhiyun  */
sci_port_construct_dummy_task(struct isci_port * iport,u16 tag)571*4882a593Smuzhiyun static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
574*4882a593Smuzhiyun 	struct scu_task_context *task_context;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
577*4882a593Smuzhiyun 	memset(task_context, 0, sizeof(struct scu_task_context));
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	task_context->initiator_request = 1;
580*4882a593Smuzhiyun 	task_context->connection_rate = 1;
581*4882a593Smuzhiyun 	task_context->logical_port_index = iport->physical_port_index;
582*4882a593Smuzhiyun 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
583*4882a593Smuzhiyun 	task_context->task_index = ISCI_TAG_TCI(tag);
584*4882a593Smuzhiyun 	task_context->valid = SCU_TASK_CONTEXT_VALID;
585*4882a593Smuzhiyun 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
586*4882a593Smuzhiyun 	task_context->remote_node_index = iport->reserved_rni;
587*4882a593Smuzhiyun 	task_context->do_not_dma_ssp_good_response = 1;
588*4882a593Smuzhiyun 	task_context->task_phase = 0x01;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
sci_port_destroy_dummy_resources(struct isci_port * iport)591*4882a593Smuzhiyun static void sci_port_destroy_dummy_resources(struct isci_port *iport)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
596*4882a593Smuzhiyun 		isci_free_tag(ihost, iport->reserved_tag);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	if (iport->reserved_rni != SCU_DUMMY_INDEX)
599*4882a593Smuzhiyun 		sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
600*4882a593Smuzhiyun 								     1, iport->reserved_rni);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	iport->reserved_rni = SCU_DUMMY_INDEX;
603*4882a593Smuzhiyun 	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
sci_port_setup_transports(struct isci_port * iport,u32 device_id)606*4882a593Smuzhiyun void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	u8 index;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++) {
611*4882a593Smuzhiyun 		if (iport->active_phy_mask & (1 << index))
612*4882a593Smuzhiyun 			sci_phy_setup_transport(iport->phy_table[index], device_id);
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun 
sci_port_resume_phy(struct isci_port * iport,struct isci_phy * iphy)616*4882a593Smuzhiyun static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun 	sci_phy_resume(iphy);
619*4882a593Smuzhiyun 	iport->enabled_phy_mask |= 1 << iphy->phy_index;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
sci_port_activate_phy(struct isci_port * iport,struct isci_phy * iphy,u8 flags)622*4882a593Smuzhiyun static void sci_port_activate_phy(struct isci_port *iport,
623*4882a593Smuzhiyun 				  struct isci_phy *iphy,
624*4882a593Smuzhiyun 				  u8 flags)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
629*4882a593Smuzhiyun 		sci_phy_resume(iphy);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	iport->active_phy_mask |= 1 << iphy->phy_index;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	sci_controller_clear_invalid_phy(ihost, iphy);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	if (flags & PF_NOTIFY)
636*4882a593Smuzhiyun 		isci_port_link_up(ihost, iport, iphy);
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
sci_port_deactivate_phy(struct isci_port * iport,struct isci_phy * iphy,bool do_notify_user)639*4882a593Smuzhiyun void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
640*4882a593Smuzhiyun 			     bool do_notify_user)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	iport->active_phy_mask &= ~(1 << iphy->phy_index);
645*4882a593Smuzhiyun 	iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
646*4882a593Smuzhiyun 	if (!iport->active_phy_mask)
647*4882a593Smuzhiyun 		iport->last_active_phy = iphy->phy_index;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/* Re-assign the phy back to the LP as if it were a narrow port for APC
652*4882a593Smuzhiyun 	 * mode. For MPC mode, the phy will remain in the port.
653*4882a593Smuzhiyun 	 */
654*4882a593Smuzhiyun 	if (iport->owning_controller->oem_parameters.controller.mode_type ==
655*4882a593Smuzhiyun 		SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
656*4882a593Smuzhiyun 		writel(iphy->phy_index,
657*4882a593Smuzhiyun 			&iport->port_pe_configuration_register[iphy->phy_index]);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (do_notify_user == true)
660*4882a593Smuzhiyun 		isci_port_link_down(ihost, iphy, iport);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun 
sci_port_invalid_link_up(struct isci_port * iport,struct isci_phy * iphy)663*4882a593Smuzhiyun static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	/*
668*4882a593Smuzhiyun 	 * Check to see if we have alreay reported this link as bad and if
669*4882a593Smuzhiyun 	 * not go ahead and tell the SCI_USER that we have discovered an
670*4882a593Smuzhiyun 	 * invalid link.
671*4882a593Smuzhiyun 	 */
672*4882a593Smuzhiyun 	if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
673*4882a593Smuzhiyun 		ihost->invalid_phy_mask |= 1 << iphy->phy_index;
674*4882a593Smuzhiyun 		dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun /**
679*4882a593Smuzhiyun  * sci_port_general_link_up_handler - phy can be assigned to port?
680*4882a593Smuzhiyun  * @sci_port: sci_port object for which has a phy that has gone link up.
681*4882a593Smuzhiyun  * @sci_phy: This is the struct isci_phy object that has gone link up.
682*4882a593Smuzhiyun  * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
683*4882a593Smuzhiyun  *
684*4882a593Smuzhiyun  * Determine if this phy can be assigned to this port . If the phy is
685*4882a593Smuzhiyun  * not a valid PHY for this port then the function will notify the user.
686*4882a593Smuzhiyun  * A PHY can only be part of a port if it's attached SAS ADDRESS is the
687*4882a593Smuzhiyun  * same as all other PHYs in the same port.
688*4882a593Smuzhiyun  */
sci_port_general_link_up_handler(struct isci_port * iport,struct isci_phy * iphy,u8 flags)689*4882a593Smuzhiyun static void sci_port_general_link_up_handler(struct isci_port *iport,
690*4882a593Smuzhiyun 					     struct isci_phy *iphy,
691*4882a593Smuzhiyun 					     u8 flags)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	struct sci_sas_address port_sas_address;
694*4882a593Smuzhiyun 	struct sci_sas_address phy_sas_address;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	sci_port_get_attached_sas_address(iport, &port_sas_address);
697*4882a593Smuzhiyun 	sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	/* If the SAS address of the new phy matches the SAS address of
700*4882a593Smuzhiyun 	 * other phys in the port OR this is the first phy in the port,
701*4882a593Smuzhiyun 	 * then activate the phy and allow it to be used for operations
702*4882a593Smuzhiyun 	 * in this port.
703*4882a593Smuzhiyun 	 */
704*4882a593Smuzhiyun 	if ((phy_sas_address.high == port_sas_address.high &&
705*4882a593Smuzhiyun 	     phy_sas_address.low  == port_sas_address.low) ||
706*4882a593Smuzhiyun 	    iport->active_phy_mask == 0) {
707*4882a593Smuzhiyun 		struct sci_base_state_machine *sm = &iport->sm;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		sci_port_activate_phy(iport, iphy, flags);
710*4882a593Smuzhiyun 		if (sm->current_state_id == SCI_PORT_RESETTING)
711*4882a593Smuzhiyun 			port_state_machine_change(iport, SCI_PORT_READY);
712*4882a593Smuzhiyun 	} else
713*4882a593Smuzhiyun 		sci_port_invalid_link_up(iport, iphy);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun /**
719*4882a593Smuzhiyun  * This method returns false if the port only has a single phy object assigned.
720*4882a593Smuzhiyun  *     If there are no phys or more than one phy then the method will return
721*4882a593Smuzhiyun  *    true.
722*4882a593Smuzhiyun  * @sci_port: The port for which the wide port condition is to be checked.
723*4882a593Smuzhiyun  *
724*4882a593Smuzhiyun  * bool true Is returned if this is a wide ported port. false Is returned if
725*4882a593Smuzhiyun  * this is a narrow port.
726*4882a593Smuzhiyun  */
sci_port_is_wide(struct isci_port * iport)727*4882a593Smuzhiyun static bool sci_port_is_wide(struct isci_port *iport)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	u32 index;
730*4882a593Smuzhiyun 	u32 phy_count = 0;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++) {
733*4882a593Smuzhiyun 		if (iport->phy_table[index] != NULL) {
734*4882a593Smuzhiyun 			phy_count++;
735*4882a593Smuzhiyun 		}
736*4882a593Smuzhiyun 	}
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	return phy_count != 1;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun /**
742*4882a593Smuzhiyun  * This method is called by the PHY object when the link is detected. if the
743*4882a593Smuzhiyun  *    port wants the PHY to continue on to the link up state then the port
744*4882a593Smuzhiyun  *    layer must return true.  If the port object returns false the phy object
745*4882a593Smuzhiyun  *    must halt its attempt to go link up.
746*4882a593Smuzhiyun  * @sci_port: The port associated with the phy object.
747*4882a593Smuzhiyun  * @sci_phy: The phy object that is trying to go link up.
748*4882a593Smuzhiyun  *
749*4882a593Smuzhiyun  * true if the phy object can continue to the link up condition. true Is
750*4882a593Smuzhiyun  * returned if this phy can continue to the ready state. false Is returned if
751*4882a593Smuzhiyun  * can not continue on to the ready state. This notification is in place for
752*4882a593Smuzhiyun  * wide ports and direct attached phys.  Since there are no wide ported SATA
753*4882a593Smuzhiyun  * devices this could become an invalid port configuration.
754*4882a593Smuzhiyun  */
sci_port_link_detected(struct isci_port * iport,struct isci_phy * iphy)755*4882a593Smuzhiyun bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun 	if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
758*4882a593Smuzhiyun 	    (iphy->protocol == SAS_PROTOCOL_SATA)) {
759*4882a593Smuzhiyun 		if (sci_port_is_wide(iport)) {
760*4882a593Smuzhiyun 			sci_port_invalid_link_up(iport, iphy);
761*4882a593Smuzhiyun 			return false;
762*4882a593Smuzhiyun 		} else {
763*4882a593Smuzhiyun 			struct isci_host *ihost = iport->owning_controller;
764*4882a593Smuzhiyun 			struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
765*4882a593Smuzhiyun 			writel(iphy->phy_index,
766*4882a593Smuzhiyun 			       &dst_port->port_pe_configuration_register[iphy->phy_index]);
767*4882a593Smuzhiyun 		}
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	return true;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun 
port_timeout(struct timer_list * t)773*4882a593Smuzhiyun static void port_timeout(struct timer_list *t)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	struct sci_timer *tmr = from_timer(tmr, t, timer);
776*4882a593Smuzhiyun 	struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
777*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
778*4882a593Smuzhiyun 	unsigned long flags;
779*4882a593Smuzhiyun 	u32 current_state;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	spin_lock_irqsave(&ihost->scic_lock, flags);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (tmr->cancel)
784*4882a593Smuzhiyun 		goto done;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	current_state = iport->sm.current_state_id;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	if (current_state == SCI_PORT_RESETTING) {
789*4882a593Smuzhiyun 		/* if the port is still in the resetting state then the timeout
790*4882a593Smuzhiyun 		 * fired before the reset completed.
791*4882a593Smuzhiyun 		 */
792*4882a593Smuzhiyun 		port_state_machine_change(iport, SCI_PORT_FAILED);
793*4882a593Smuzhiyun 	} else if (current_state == SCI_PORT_STOPPED) {
794*4882a593Smuzhiyun 		/* if the port is stopped then the start request failed In this
795*4882a593Smuzhiyun 		 * case stay in the stopped state.
796*4882a593Smuzhiyun 		 */
797*4882a593Smuzhiyun 		dev_err(sciport_to_dev(iport),
798*4882a593Smuzhiyun 			"%s: SCIC Port 0x%p failed to stop before timeout.\n",
799*4882a593Smuzhiyun 			__func__,
800*4882a593Smuzhiyun 			iport);
801*4882a593Smuzhiyun 	} else if (current_state == SCI_PORT_STOPPING) {
802*4882a593Smuzhiyun 		dev_dbg(sciport_to_dev(iport),
803*4882a593Smuzhiyun 			"%s: port%d: stop complete timeout\n",
804*4882a593Smuzhiyun 			__func__, iport->physical_port_index);
805*4882a593Smuzhiyun 	} else {
806*4882a593Smuzhiyun 		/* The port is in the ready state and we have a timer
807*4882a593Smuzhiyun 		 * reporting a timeout this should not happen.
808*4882a593Smuzhiyun 		 */
809*4882a593Smuzhiyun 		dev_err(sciport_to_dev(iport),
810*4882a593Smuzhiyun 			"%s: SCIC Port 0x%p is processing a timeout operation "
811*4882a593Smuzhiyun 			"in state %d.\n", __func__, iport, current_state);
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun done:
815*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun /* --------------------------------------------------------------------------- */
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun /**
821*4882a593Smuzhiyun  * This function updates the hardwares VIIT entry for this port.
822*4882a593Smuzhiyun  *
823*4882a593Smuzhiyun  *
824*4882a593Smuzhiyun  */
sci_port_update_viit_entry(struct isci_port * iport)825*4882a593Smuzhiyun static void sci_port_update_viit_entry(struct isci_port *iport)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	struct sci_sas_address sas_address;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	sci_port_get_sas_address(iport, &sas_address);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	writel(sas_address.high,
832*4882a593Smuzhiyun 		&iport->viit_registers->initiator_sas_address_hi);
833*4882a593Smuzhiyun 	writel(sas_address.low,
834*4882a593Smuzhiyun 		&iport->viit_registers->initiator_sas_address_lo);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	/* This value get cleared just in case its not already cleared */
837*4882a593Smuzhiyun 	writel(0, &iport->viit_registers->reserved);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	/* We are required to update the status register last */
840*4882a593Smuzhiyun 	writel(SCU_VIIT_ENTRY_ID_VIIT |
841*4882a593Smuzhiyun 	       SCU_VIIT_IPPT_INITIATOR |
842*4882a593Smuzhiyun 	       ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
843*4882a593Smuzhiyun 	       SCU_VIIT_STATUS_ALL_VALID,
844*4882a593Smuzhiyun 	       &iport->viit_registers->status);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
sci_port_get_max_allowed_speed(struct isci_port * iport)847*4882a593Smuzhiyun enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	u16 index;
850*4882a593Smuzhiyun 	struct isci_phy *iphy;
851*4882a593Smuzhiyun 	enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	/*
854*4882a593Smuzhiyun 	 * Loop through all of the phys in this port and find the phy with the
855*4882a593Smuzhiyun 	 * lowest maximum link rate. */
856*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++) {
857*4882a593Smuzhiyun 		iphy = iport->phy_table[index];
858*4882a593Smuzhiyun 		if (iphy && sci_port_active_phy(iport, iphy) &&
859*4882a593Smuzhiyun 		    iphy->max_negotiated_speed < max_allowed_speed)
860*4882a593Smuzhiyun 			max_allowed_speed = iphy->max_negotiated_speed;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	return max_allowed_speed;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
sci_port_suspend_port_task_scheduler(struct isci_port * iport)866*4882a593Smuzhiyun static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun 	u32 pts_control_value;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
871*4882a593Smuzhiyun 	pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
872*4882a593Smuzhiyun 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun /**
876*4882a593Smuzhiyun  * sci_port_post_dummy_request() - post dummy/workaround request
877*4882a593Smuzhiyun  * @sci_port: port to post task
878*4882a593Smuzhiyun  *
879*4882a593Smuzhiyun  * Prevent the hardware scheduler from posting new requests to the front
880*4882a593Smuzhiyun  * of the scheduler queue causing a starvation problem for currently
881*4882a593Smuzhiyun  * ongoing requests.
882*4882a593Smuzhiyun  *
883*4882a593Smuzhiyun  */
sci_port_post_dummy_request(struct isci_port * iport)884*4882a593Smuzhiyun static void sci_port_post_dummy_request(struct isci_port *iport)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
887*4882a593Smuzhiyun 	u16 tag = iport->reserved_tag;
888*4882a593Smuzhiyun 	struct scu_task_context *tc;
889*4882a593Smuzhiyun 	u32 command;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
892*4882a593Smuzhiyun 	tc->abort = 0;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
895*4882a593Smuzhiyun 		  iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
896*4882a593Smuzhiyun 		  ISCI_TAG_TCI(tag);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	sci_controller_post_request(ihost, command);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun /**
902*4882a593Smuzhiyun  * This routine will abort the dummy request.  This will alow the hardware to
903*4882a593Smuzhiyun  * power down parts of the silicon to save power.
904*4882a593Smuzhiyun  *
905*4882a593Smuzhiyun  * @sci_port: The port on which the task must be aborted.
906*4882a593Smuzhiyun  *
907*4882a593Smuzhiyun  */
sci_port_abort_dummy_request(struct isci_port * iport)908*4882a593Smuzhiyun static void sci_port_abort_dummy_request(struct isci_port *iport)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
911*4882a593Smuzhiyun 	u16 tag = iport->reserved_tag;
912*4882a593Smuzhiyun 	struct scu_task_context *tc;
913*4882a593Smuzhiyun 	u32 command;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
916*4882a593Smuzhiyun 	tc->abort = 1;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
919*4882a593Smuzhiyun 		  iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
920*4882a593Smuzhiyun 		  ISCI_TAG_TCI(tag);
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	sci_controller_post_request(ihost, command);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun /**
926*4882a593Smuzhiyun  *
927*4882a593Smuzhiyun  * @sci_port: This is the struct isci_port object to resume.
928*4882a593Smuzhiyun  *
929*4882a593Smuzhiyun  * This method will resume the port task scheduler for this port object. none
930*4882a593Smuzhiyun  */
931*4882a593Smuzhiyun static void
sci_port_resume_port_task_scheduler(struct isci_port * iport)932*4882a593Smuzhiyun sci_port_resume_port_task_scheduler(struct isci_port *iport)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	u32 pts_control_value;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
937*4882a593Smuzhiyun 	pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
938*4882a593Smuzhiyun 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
sci_port_ready_substate_waiting_enter(struct sci_base_state_machine * sm)941*4882a593Smuzhiyun static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	sci_port_suspend_port_task_scheduler(iport);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	if (iport->active_phy_mask != 0) {
950*4882a593Smuzhiyun 		/* At least one of the phys on the port is ready */
951*4882a593Smuzhiyun 		port_state_machine_change(iport,
952*4882a593Smuzhiyun 					  SCI_PORT_SUB_OPERATIONAL);
953*4882a593Smuzhiyun 	}
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun 
scic_sds_port_ready_substate_waiting_exit(struct sci_base_state_machine * sm)956*4882a593Smuzhiyun static void scic_sds_port_ready_substate_waiting_exit(
957*4882a593Smuzhiyun 					struct sci_base_state_machine *sm)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
960*4882a593Smuzhiyun 	sci_port_resume_port_task_scheduler(iport);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun 
sci_port_ready_substate_operational_enter(struct sci_base_state_machine * sm)963*4882a593Smuzhiyun static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun 	u32 index;
966*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
967*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
970*4882a593Smuzhiyun 		__func__, iport->physical_port_index);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++) {
973*4882a593Smuzhiyun 		if (iport->phy_table[index]) {
974*4882a593Smuzhiyun 			writel(iport->physical_port_index,
975*4882a593Smuzhiyun 				&iport->port_pe_configuration_register[
976*4882a593Smuzhiyun 					iport->phy_table[index]->phy_index]);
977*4882a593Smuzhiyun 			if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
978*4882a593Smuzhiyun 				sci_port_resume_phy(iport, iport->phy_table[index]);
979*4882a593Smuzhiyun 		}
980*4882a593Smuzhiyun 	}
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	sci_port_update_viit_entry(iport);
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	/*
985*4882a593Smuzhiyun 	 * Post the dummy task for the port so the hardware can schedule
986*4882a593Smuzhiyun 	 * io correctly
987*4882a593Smuzhiyun 	 */
988*4882a593Smuzhiyun 	sci_port_post_dummy_request(iport);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
sci_port_invalidate_dummy_remote_node(struct isci_port * iport)991*4882a593Smuzhiyun static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
994*4882a593Smuzhiyun 	u8 phys_index = iport->physical_port_index;
995*4882a593Smuzhiyun 	union scu_remote_node_context *rnc;
996*4882a593Smuzhiyun 	u16 rni = iport->reserved_rni;
997*4882a593Smuzhiyun 	u32 command;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	rnc = &ihost->remote_node_context_table[rni];
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	rnc->ssp.is_valid = false;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	/* ensure the preceding tc abort request has reached the
1004*4882a593Smuzhiyun 	 * controller and give it ample time to act before posting the rnc
1005*4882a593Smuzhiyun 	 * invalidate
1006*4882a593Smuzhiyun 	 */
1007*4882a593Smuzhiyun 	readl(&ihost->smu_registers->interrupt_status); /* flush */
1008*4882a593Smuzhiyun 	udelay(10);
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
1011*4882a593Smuzhiyun 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	sci_controller_post_request(ihost, command);
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun /**
1017*4882a593Smuzhiyun  *
1018*4882a593Smuzhiyun  * @object: This is the object which is cast to a struct isci_port object.
1019*4882a593Smuzhiyun  *
1020*4882a593Smuzhiyun  * This method will perform the actions required by the struct isci_port on
1021*4882a593Smuzhiyun  * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
1022*4882a593Smuzhiyun  * the port not ready and suspends the port task scheduler. none
1023*4882a593Smuzhiyun  */
sci_port_ready_substate_operational_exit(struct sci_base_state_machine * sm)1024*4882a593Smuzhiyun static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1027*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	/*
1030*4882a593Smuzhiyun 	 * Kill the dummy task for this port if it has not yet posted
1031*4882a593Smuzhiyun 	 * the hardware will treat this as a NOP and just return abort
1032*4882a593Smuzhiyun 	 * complete.
1033*4882a593Smuzhiyun 	 */
1034*4882a593Smuzhiyun 	sci_port_abort_dummy_request(iport);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1037*4882a593Smuzhiyun 		__func__, iport->physical_port_index);
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	if (iport->ready_exit)
1040*4882a593Smuzhiyun 		sci_port_invalidate_dummy_remote_node(iport);
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun 
sci_port_ready_substate_configuring_enter(struct sci_base_state_machine * sm)1043*4882a593Smuzhiyun static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1046*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	if (iport->active_phy_mask == 0) {
1049*4882a593Smuzhiyun 		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1050*4882a593Smuzhiyun 			__func__, iport->physical_port_index);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 		port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1053*4882a593Smuzhiyun 	} else
1054*4882a593Smuzhiyun 		port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
sci_port_start(struct isci_port * iport)1057*4882a593Smuzhiyun enum sci_status sci_port_start(struct isci_port *iport)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
1060*4882a593Smuzhiyun 	enum sci_status status = SCI_SUCCESS;
1061*4882a593Smuzhiyun 	enum sci_port_states state;
1062*4882a593Smuzhiyun 	u32 phy_mask;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1065*4882a593Smuzhiyun 	if (state != SCI_PORT_STOPPED) {
1066*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1067*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1068*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1069*4882a593Smuzhiyun 	}
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	if (iport->assigned_device_count > 0) {
1072*4882a593Smuzhiyun 		/* TODO This is a start failure operation because
1073*4882a593Smuzhiyun 		 * there are still devices assigned to this port.
1074*4882a593Smuzhiyun 		 * There must be no devices assigned to a port on a
1075*4882a593Smuzhiyun 		 * start operation.
1076*4882a593Smuzhiyun 		 */
1077*4882a593Smuzhiyun 		return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1081*4882a593Smuzhiyun 		u16 rni = sci_remote_node_table_allocate_remote_node(
1082*4882a593Smuzhiyun 				&ihost->available_remote_nodes, 1);
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 		if (rni != SCU_DUMMY_INDEX)
1085*4882a593Smuzhiyun 			sci_port_construct_dummy_rnc(iport, rni);
1086*4882a593Smuzhiyun 		else
1087*4882a593Smuzhiyun 			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1088*4882a593Smuzhiyun 		iport->reserved_rni = rni;
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1092*4882a593Smuzhiyun 		u16 tag;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 		tag = isci_alloc_tag(ihost);
1095*4882a593Smuzhiyun 		if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
1096*4882a593Smuzhiyun 			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1097*4882a593Smuzhiyun 		else
1098*4882a593Smuzhiyun 			sci_port_construct_dummy_task(iport, tag);
1099*4882a593Smuzhiyun 		iport->reserved_tag = tag;
1100*4882a593Smuzhiyun 	}
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	if (status == SCI_SUCCESS) {
1103*4882a593Smuzhiyun 		phy_mask = sci_port_get_phys(iport);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		/*
1106*4882a593Smuzhiyun 		 * There are one or more phys assigned to this port.  Make sure
1107*4882a593Smuzhiyun 		 * the port's phy mask is in fact legal and supported by the
1108*4882a593Smuzhiyun 		 * silicon.
1109*4882a593Smuzhiyun 		 */
1110*4882a593Smuzhiyun 		if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1111*4882a593Smuzhiyun 			port_state_machine_change(iport,
1112*4882a593Smuzhiyun 						  SCI_PORT_READY);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 			return SCI_SUCCESS;
1115*4882a593Smuzhiyun 		}
1116*4882a593Smuzhiyun 		status = SCI_FAILURE;
1117*4882a593Smuzhiyun 	}
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	if (status != SCI_SUCCESS)
1120*4882a593Smuzhiyun 		sci_port_destroy_dummy_resources(iport);
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	return status;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
sci_port_stop(struct isci_port * iport)1125*4882a593Smuzhiyun enum sci_status sci_port_stop(struct isci_port *iport)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	enum sci_port_states state;
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1130*4882a593Smuzhiyun 	switch (state) {
1131*4882a593Smuzhiyun 	case SCI_PORT_STOPPED:
1132*4882a593Smuzhiyun 		return SCI_SUCCESS;
1133*4882a593Smuzhiyun 	case SCI_PORT_SUB_WAITING:
1134*4882a593Smuzhiyun 	case SCI_PORT_SUB_OPERATIONAL:
1135*4882a593Smuzhiyun 	case SCI_PORT_SUB_CONFIGURING:
1136*4882a593Smuzhiyun 	case SCI_PORT_RESETTING:
1137*4882a593Smuzhiyun 		port_state_machine_change(iport,
1138*4882a593Smuzhiyun 					  SCI_PORT_STOPPING);
1139*4882a593Smuzhiyun 		return SCI_SUCCESS;
1140*4882a593Smuzhiyun 	default:
1141*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1142*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1143*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun 
sci_port_hard_reset(struct isci_port * iport,u32 timeout)1147*4882a593Smuzhiyun static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun 	enum sci_status status = SCI_FAILURE_INVALID_PHY;
1150*4882a593Smuzhiyun 	struct isci_phy *iphy = NULL;
1151*4882a593Smuzhiyun 	enum sci_port_states state;
1152*4882a593Smuzhiyun 	u32 phy_index;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1155*4882a593Smuzhiyun 	if (state != SCI_PORT_SUB_OPERATIONAL) {
1156*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1157*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1158*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1159*4882a593Smuzhiyun 	}
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	/* Select a phy on which we can send the hard reset request. */
1162*4882a593Smuzhiyun 	for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
1163*4882a593Smuzhiyun 		iphy = iport->phy_table[phy_index];
1164*4882a593Smuzhiyun 		if (iphy && !sci_port_active_phy(iport, iphy)) {
1165*4882a593Smuzhiyun 			/*
1166*4882a593Smuzhiyun 			 * We found a phy but it is not ready select
1167*4882a593Smuzhiyun 			 * different phy
1168*4882a593Smuzhiyun 			 */
1169*4882a593Smuzhiyun 			iphy = NULL;
1170*4882a593Smuzhiyun 		}
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	/* If we have a phy then go ahead and start the reset procedure */
1174*4882a593Smuzhiyun 	if (!iphy)
1175*4882a593Smuzhiyun 		return status;
1176*4882a593Smuzhiyun 	status = sci_phy_reset(iphy);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	if (status != SCI_SUCCESS)
1179*4882a593Smuzhiyun 		return status;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	sci_mod_timer(&iport->timer, timeout);
1182*4882a593Smuzhiyun 	iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	port_state_machine_change(iport, SCI_PORT_RESETTING);
1185*4882a593Smuzhiyun 	return SCI_SUCCESS;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun /**
1189*4882a593Smuzhiyun  * sci_port_add_phy() -
1190*4882a593Smuzhiyun  * @sci_port: This parameter specifies the port in which the phy will be added.
1191*4882a593Smuzhiyun  * @sci_phy: This parameter is the phy which is to be added to the port.
1192*4882a593Smuzhiyun  *
1193*4882a593Smuzhiyun  * This method will add a PHY to the selected port. This method returns an
1194*4882a593Smuzhiyun  * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
1195*4882a593Smuzhiyun  * status is a failure to add the phy to the port.
1196*4882a593Smuzhiyun  */
sci_port_add_phy(struct isci_port * iport,struct isci_phy * iphy)1197*4882a593Smuzhiyun enum sci_status sci_port_add_phy(struct isci_port *iport,
1198*4882a593Smuzhiyun 				      struct isci_phy *iphy)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	enum sci_status status;
1201*4882a593Smuzhiyun 	enum sci_port_states state;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	sci_port_bcn_enable(iport);
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1206*4882a593Smuzhiyun 	switch (state) {
1207*4882a593Smuzhiyun 	case SCI_PORT_STOPPED: {
1208*4882a593Smuzhiyun 		struct sci_sas_address port_sas_address;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 		/* Read the port assigned SAS Address if there is one */
1211*4882a593Smuzhiyun 		sci_port_get_sas_address(iport, &port_sas_address);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 		if (port_sas_address.high != 0 && port_sas_address.low != 0) {
1214*4882a593Smuzhiyun 			struct sci_sas_address phy_sas_address;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 			/* Make sure that the PHY SAS Address matches the SAS Address
1217*4882a593Smuzhiyun 			 * for this port
1218*4882a593Smuzhiyun 			 */
1219*4882a593Smuzhiyun 			sci_phy_get_sas_address(iphy, &phy_sas_address);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 			if (port_sas_address.high != phy_sas_address.high ||
1222*4882a593Smuzhiyun 			    port_sas_address.low  != phy_sas_address.low)
1223*4882a593Smuzhiyun 				return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1224*4882a593Smuzhiyun 		}
1225*4882a593Smuzhiyun 		return sci_port_set_phy(iport, iphy);
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun 	case SCI_PORT_SUB_WAITING:
1228*4882a593Smuzhiyun 	case SCI_PORT_SUB_OPERATIONAL:
1229*4882a593Smuzhiyun 		status = sci_port_set_phy(iport, iphy);
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		if (status != SCI_SUCCESS)
1232*4882a593Smuzhiyun 			return status;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1235*4882a593Smuzhiyun 		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1236*4882a593Smuzhiyun 		port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 		return status;
1239*4882a593Smuzhiyun 	case SCI_PORT_SUB_CONFIGURING:
1240*4882a593Smuzhiyun 		status = sci_port_set_phy(iport, iphy);
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 		if (status != SCI_SUCCESS)
1243*4882a593Smuzhiyun 			return status;
1244*4882a593Smuzhiyun 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 		/* Re-enter the configuring state since this may be the last phy in
1247*4882a593Smuzhiyun 		 * the port.
1248*4882a593Smuzhiyun 		 */
1249*4882a593Smuzhiyun 		port_state_machine_change(iport,
1250*4882a593Smuzhiyun 					  SCI_PORT_SUB_CONFIGURING);
1251*4882a593Smuzhiyun 		return SCI_SUCCESS;
1252*4882a593Smuzhiyun 	default:
1253*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1254*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1255*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1256*4882a593Smuzhiyun 	}
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun /**
1260*4882a593Smuzhiyun  * sci_port_remove_phy() -
1261*4882a593Smuzhiyun  * @sci_port: This parameter specifies the port in which the phy will be added.
1262*4882a593Smuzhiyun  * @sci_phy: This parameter is the phy which is to be added to the port.
1263*4882a593Smuzhiyun  *
1264*4882a593Smuzhiyun  * This method will remove the PHY from the selected PORT. This method returns
1265*4882a593Smuzhiyun  * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
1266*4882a593Smuzhiyun  * other status is a failure to add the phy to the port.
1267*4882a593Smuzhiyun  */
sci_port_remove_phy(struct isci_port * iport,struct isci_phy * iphy)1268*4882a593Smuzhiyun enum sci_status sci_port_remove_phy(struct isci_port *iport,
1269*4882a593Smuzhiyun 					 struct isci_phy *iphy)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun 	enum sci_status status;
1272*4882a593Smuzhiyun 	enum sci_port_states state;
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	switch (state) {
1277*4882a593Smuzhiyun 	case SCI_PORT_STOPPED:
1278*4882a593Smuzhiyun 		return sci_port_clear_phy(iport, iphy);
1279*4882a593Smuzhiyun 	case SCI_PORT_SUB_OPERATIONAL:
1280*4882a593Smuzhiyun 		status = sci_port_clear_phy(iport, iphy);
1281*4882a593Smuzhiyun 		if (status != SCI_SUCCESS)
1282*4882a593Smuzhiyun 			return status;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 		sci_port_deactivate_phy(iport, iphy, true);
1285*4882a593Smuzhiyun 		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1286*4882a593Smuzhiyun 		port_state_machine_change(iport,
1287*4882a593Smuzhiyun 					  SCI_PORT_SUB_CONFIGURING);
1288*4882a593Smuzhiyun 		return SCI_SUCCESS;
1289*4882a593Smuzhiyun 	case SCI_PORT_SUB_CONFIGURING:
1290*4882a593Smuzhiyun 		status = sci_port_clear_phy(iport, iphy);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 		if (status != SCI_SUCCESS)
1293*4882a593Smuzhiyun 			return status;
1294*4882a593Smuzhiyun 		sci_port_deactivate_phy(iport, iphy, true);
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 		/* Re-enter the configuring state since this may be the last phy in
1297*4882a593Smuzhiyun 		 * the port
1298*4882a593Smuzhiyun 		 */
1299*4882a593Smuzhiyun 		port_state_machine_change(iport,
1300*4882a593Smuzhiyun 					  SCI_PORT_SUB_CONFIGURING);
1301*4882a593Smuzhiyun 		return SCI_SUCCESS;
1302*4882a593Smuzhiyun 	default:
1303*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1304*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1305*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun 
sci_port_link_up(struct isci_port * iport,struct isci_phy * iphy)1309*4882a593Smuzhiyun enum sci_status sci_port_link_up(struct isci_port *iport,
1310*4882a593Smuzhiyun 				      struct isci_phy *iphy)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun 	enum sci_port_states state;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1315*4882a593Smuzhiyun 	switch (state) {
1316*4882a593Smuzhiyun 	case SCI_PORT_SUB_WAITING:
1317*4882a593Smuzhiyun 		/* Since this is the first phy going link up for the port we
1318*4882a593Smuzhiyun 		 * can just enable it and continue
1319*4882a593Smuzhiyun 		 */
1320*4882a593Smuzhiyun 		sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 		port_state_machine_change(iport,
1323*4882a593Smuzhiyun 					  SCI_PORT_SUB_OPERATIONAL);
1324*4882a593Smuzhiyun 		return SCI_SUCCESS;
1325*4882a593Smuzhiyun 	case SCI_PORT_SUB_OPERATIONAL:
1326*4882a593Smuzhiyun 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1327*4882a593Smuzhiyun 		return SCI_SUCCESS;
1328*4882a593Smuzhiyun 	case SCI_PORT_RESETTING:
1329*4882a593Smuzhiyun 		/* TODO We should  make  sure  that  the phy  that  has gone
1330*4882a593Smuzhiyun 		 * link up is the same one on which we sent the reset.  It is
1331*4882a593Smuzhiyun 		 * possible that the phy on which we sent  the reset is not the
1332*4882a593Smuzhiyun 		 * one that has  gone  link up  and we  want to make sure that
1333*4882a593Smuzhiyun 		 * phy being reset  comes  back.  Consider the case where a
1334*4882a593Smuzhiyun 		 * reset is sent but before the hardware processes the reset it
1335*4882a593Smuzhiyun 		 * get a link up on  the  port because of a hot plug event.
1336*4882a593Smuzhiyun 		 * because  of  the reset request this phy will go link down
1337*4882a593Smuzhiyun 		 * almost immediately.
1338*4882a593Smuzhiyun 		 */
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 		/* In the resetting state we don't notify the user regarding
1341*4882a593Smuzhiyun 		 * link up and link down notifications.
1342*4882a593Smuzhiyun 		 */
1343*4882a593Smuzhiyun 		sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1344*4882a593Smuzhiyun 		return SCI_SUCCESS;
1345*4882a593Smuzhiyun 	default:
1346*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1347*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1348*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1349*4882a593Smuzhiyun 	}
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun 
sci_port_link_down(struct isci_port * iport,struct isci_phy * iphy)1352*4882a593Smuzhiyun enum sci_status sci_port_link_down(struct isci_port *iport,
1353*4882a593Smuzhiyun 					struct isci_phy *iphy)
1354*4882a593Smuzhiyun {
1355*4882a593Smuzhiyun 	enum sci_port_states state;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1358*4882a593Smuzhiyun 	switch (state) {
1359*4882a593Smuzhiyun 	case SCI_PORT_SUB_OPERATIONAL:
1360*4882a593Smuzhiyun 		sci_port_deactivate_phy(iport, iphy, true);
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 		/* If there are no active phys left in the port, then
1363*4882a593Smuzhiyun 		 * transition the port to the WAITING state until such time
1364*4882a593Smuzhiyun 		 * as a phy goes link up
1365*4882a593Smuzhiyun 		 */
1366*4882a593Smuzhiyun 		if (iport->active_phy_mask == 0)
1367*4882a593Smuzhiyun 			port_state_machine_change(iport,
1368*4882a593Smuzhiyun 						  SCI_PORT_SUB_WAITING);
1369*4882a593Smuzhiyun 		return SCI_SUCCESS;
1370*4882a593Smuzhiyun 	case SCI_PORT_RESETTING:
1371*4882a593Smuzhiyun 		/* In the resetting state we don't notify the user regarding
1372*4882a593Smuzhiyun 		 * link up and link down notifications. */
1373*4882a593Smuzhiyun 		sci_port_deactivate_phy(iport, iphy, false);
1374*4882a593Smuzhiyun 		return SCI_SUCCESS;
1375*4882a593Smuzhiyun 	default:
1376*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1377*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1378*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun 
sci_port_start_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)1382*4882a593Smuzhiyun enum sci_status sci_port_start_io(struct isci_port *iport,
1383*4882a593Smuzhiyun 				  struct isci_remote_device *idev,
1384*4882a593Smuzhiyun 				  struct isci_request *ireq)
1385*4882a593Smuzhiyun {
1386*4882a593Smuzhiyun 	enum sci_port_states state;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1389*4882a593Smuzhiyun 	switch (state) {
1390*4882a593Smuzhiyun 	case SCI_PORT_SUB_WAITING:
1391*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1392*4882a593Smuzhiyun 	case SCI_PORT_SUB_OPERATIONAL:
1393*4882a593Smuzhiyun 		iport->started_request_count++;
1394*4882a593Smuzhiyun 		return SCI_SUCCESS;
1395*4882a593Smuzhiyun 	default:
1396*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1397*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1398*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1399*4882a593Smuzhiyun 	}
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun 
sci_port_complete_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)1402*4882a593Smuzhiyun enum sci_status sci_port_complete_io(struct isci_port *iport,
1403*4882a593Smuzhiyun 				     struct isci_remote_device *idev,
1404*4882a593Smuzhiyun 				     struct isci_request *ireq)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun 	enum sci_port_states state;
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	state = iport->sm.current_state_id;
1409*4882a593Smuzhiyun 	switch (state) {
1410*4882a593Smuzhiyun 	case SCI_PORT_STOPPED:
1411*4882a593Smuzhiyun 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1412*4882a593Smuzhiyun 			 __func__, port_state_name(state));
1413*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_STATE;
1414*4882a593Smuzhiyun 	case SCI_PORT_STOPPING:
1415*4882a593Smuzhiyun 		sci_port_decrement_request_count(iport);
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 		if (iport->started_request_count == 0)
1418*4882a593Smuzhiyun 			port_state_machine_change(iport,
1419*4882a593Smuzhiyun 						  SCI_PORT_STOPPED);
1420*4882a593Smuzhiyun 		break;
1421*4882a593Smuzhiyun 	case SCI_PORT_READY:
1422*4882a593Smuzhiyun 	case SCI_PORT_RESETTING:
1423*4882a593Smuzhiyun 	case SCI_PORT_FAILED:
1424*4882a593Smuzhiyun 	case SCI_PORT_SUB_WAITING:
1425*4882a593Smuzhiyun 	case SCI_PORT_SUB_OPERATIONAL:
1426*4882a593Smuzhiyun 		sci_port_decrement_request_count(iport);
1427*4882a593Smuzhiyun 		break;
1428*4882a593Smuzhiyun 	case SCI_PORT_SUB_CONFIGURING:
1429*4882a593Smuzhiyun 		sci_port_decrement_request_count(iport);
1430*4882a593Smuzhiyun 		if (iport->started_request_count == 0) {
1431*4882a593Smuzhiyun 			port_state_machine_change(iport,
1432*4882a593Smuzhiyun 						  SCI_PORT_SUB_OPERATIONAL);
1433*4882a593Smuzhiyun 		}
1434*4882a593Smuzhiyun 		break;
1435*4882a593Smuzhiyun 	}
1436*4882a593Smuzhiyun 	return SCI_SUCCESS;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
sci_port_enable_port_task_scheduler(struct isci_port * iport)1439*4882a593Smuzhiyun static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun 	u32 pts_control_value;
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	 /* enable the port task scheduler in a suspended state */
1444*4882a593Smuzhiyun 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1445*4882a593Smuzhiyun 	pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
1446*4882a593Smuzhiyun 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun 
sci_port_disable_port_task_scheduler(struct isci_port * iport)1449*4882a593Smuzhiyun static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun 	u32 pts_control_value;
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1454*4882a593Smuzhiyun 	pts_control_value &=
1455*4882a593Smuzhiyun 		~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
1456*4882a593Smuzhiyun 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun 
sci_port_post_dummy_remote_node(struct isci_port * iport)1459*4882a593Smuzhiyun static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
1462*4882a593Smuzhiyun 	u8 phys_index = iport->physical_port_index;
1463*4882a593Smuzhiyun 	union scu_remote_node_context *rnc;
1464*4882a593Smuzhiyun 	u16 rni = iport->reserved_rni;
1465*4882a593Smuzhiyun 	u32 command;
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	rnc = &ihost->remote_node_context_table[rni];
1468*4882a593Smuzhiyun 	rnc->ssp.is_valid = true;
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
1471*4882a593Smuzhiyun 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 	sci_controller_post_request(ihost, command);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	/* ensure hardware has seen the post rnc command and give it
1476*4882a593Smuzhiyun 	 * ample time to act before sending the suspend
1477*4882a593Smuzhiyun 	 */
1478*4882a593Smuzhiyun 	readl(&ihost->smu_registers->interrupt_status); /* flush */
1479*4882a593Smuzhiyun 	udelay(10);
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
1482*4882a593Smuzhiyun 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	sci_controller_post_request(ihost, command);
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun 
sci_port_stopped_state_enter(struct sci_base_state_machine * sm)1487*4882a593Smuzhiyun static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1492*4882a593Smuzhiyun 		/*
1493*4882a593Smuzhiyun 		 * If we enter this state becasuse of a request to stop
1494*4882a593Smuzhiyun 		 * the port then we want to disable the hardwares port
1495*4882a593Smuzhiyun 		 * task scheduler. */
1496*4882a593Smuzhiyun 		sci_port_disable_port_task_scheduler(iport);
1497*4882a593Smuzhiyun 	}
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun 
sci_port_stopped_state_exit(struct sci_base_state_machine * sm)1500*4882a593Smuzhiyun static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
1501*4882a593Smuzhiyun {
1502*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	/* Enable and suspend the port task scheduler */
1505*4882a593Smuzhiyun 	sci_port_enable_port_task_scheduler(iport);
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun 
sci_port_ready_state_enter(struct sci_base_state_machine * sm)1508*4882a593Smuzhiyun static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1509*4882a593Smuzhiyun {
1510*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1511*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
1512*4882a593Smuzhiyun 	u32 prev_state;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	prev_state = iport->sm.previous_state_id;
1515*4882a593Smuzhiyun 	if (prev_state  == SCI_PORT_RESETTING)
1516*4882a593Smuzhiyun 		isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1517*4882a593Smuzhiyun 	else
1518*4882a593Smuzhiyun 		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1519*4882a593Smuzhiyun 			__func__, iport->physical_port_index);
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	/* Post and suspend the dummy remote node context for this port. */
1522*4882a593Smuzhiyun 	sci_port_post_dummy_remote_node(iport);
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	/* Start the ready substate machine */
1525*4882a593Smuzhiyun 	port_state_machine_change(iport,
1526*4882a593Smuzhiyun 				  SCI_PORT_SUB_WAITING);
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun 
sci_port_resetting_state_exit(struct sci_base_state_machine * sm)1529*4882a593Smuzhiyun static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	sci_del_timer(&iport->timer);
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun 
sci_port_stopping_state_exit(struct sci_base_state_machine * sm)1536*4882a593Smuzhiyun static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	sci_del_timer(&iport->timer);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	sci_port_destroy_dummy_resources(iport);
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun 
sci_port_failed_state_enter(struct sci_base_state_machine * sm)1545*4882a593Smuzhiyun static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1546*4882a593Smuzhiyun {
1547*4882a593Smuzhiyun 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun 
sci_port_set_hang_detection_timeout(struct isci_port * iport,u32 timeout)1552*4882a593Smuzhiyun void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun 	int phy_index;
1555*4882a593Smuzhiyun 	u32 phy_mask = iport->active_phy_mask;
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	if (timeout)
1558*4882a593Smuzhiyun 		++iport->hang_detect_users;
1559*4882a593Smuzhiyun 	else if (iport->hang_detect_users > 1)
1560*4882a593Smuzhiyun 		--iport->hang_detect_users;
1561*4882a593Smuzhiyun 	else
1562*4882a593Smuzhiyun 		iport->hang_detect_users = 0;
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	if (timeout || (iport->hang_detect_users == 0)) {
1565*4882a593Smuzhiyun 		for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
1566*4882a593Smuzhiyun 			if ((phy_mask >> phy_index) & 1) {
1567*4882a593Smuzhiyun 				writel(timeout,
1568*4882a593Smuzhiyun 				       &iport->phy_table[phy_index]
1569*4882a593Smuzhiyun 					  ->link_layer_registers
1570*4882a593Smuzhiyun 					  ->link_layer_hang_detection_timeout);
1571*4882a593Smuzhiyun 			}
1572*4882a593Smuzhiyun 		}
1573*4882a593Smuzhiyun 	}
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun /* --------------------------------------------------------------------------- */
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun static const struct sci_base_state sci_port_state_table[] = {
1578*4882a593Smuzhiyun 	[SCI_PORT_STOPPED] = {
1579*4882a593Smuzhiyun 		.enter_state = sci_port_stopped_state_enter,
1580*4882a593Smuzhiyun 		.exit_state  = sci_port_stopped_state_exit
1581*4882a593Smuzhiyun 	},
1582*4882a593Smuzhiyun 	[SCI_PORT_STOPPING] = {
1583*4882a593Smuzhiyun 		.exit_state  = sci_port_stopping_state_exit
1584*4882a593Smuzhiyun 	},
1585*4882a593Smuzhiyun 	[SCI_PORT_READY] = {
1586*4882a593Smuzhiyun 		.enter_state = sci_port_ready_state_enter,
1587*4882a593Smuzhiyun 	},
1588*4882a593Smuzhiyun 	[SCI_PORT_SUB_WAITING] = {
1589*4882a593Smuzhiyun 		.enter_state = sci_port_ready_substate_waiting_enter,
1590*4882a593Smuzhiyun 		.exit_state  = scic_sds_port_ready_substate_waiting_exit,
1591*4882a593Smuzhiyun 	},
1592*4882a593Smuzhiyun 	[SCI_PORT_SUB_OPERATIONAL] = {
1593*4882a593Smuzhiyun 		.enter_state = sci_port_ready_substate_operational_enter,
1594*4882a593Smuzhiyun 		.exit_state  = sci_port_ready_substate_operational_exit
1595*4882a593Smuzhiyun 	},
1596*4882a593Smuzhiyun 	[SCI_PORT_SUB_CONFIGURING] = {
1597*4882a593Smuzhiyun 		.enter_state = sci_port_ready_substate_configuring_enter
1598*4882a593Smuzhiyun 	},
1599*4882a593Smuzhiyun 	[SCI_PORT_RESETTING] = {
1600*4882a593Smuzhiyun 		.exit_state  = sci_port_resetting_state_exit
1601*4882a593Smuzhiyun 	},
1602*4882a593Smuzhiyun 	[SCI_PORT_FAILED] = {
1603*4882a593Smuzhiyun 		.enter_state = sci_port_failed_state_enter,
1604*4882a593Smuzhiyun 	}
1605*4882a593Smuzhiyun };
1606*4882a593Smuzhiyun 
sci_port_construct(struct isci_port * iport,u8 index,struct isci_host * ihost)1607*4882a593Smuzhiyun void sci_port_construct(struct isci_port *iport, u8 index,
1608*4882a593Smuzhiyun 			     struct isci_host *ihost)
1609*4882a593Smuzhiyun {
1610*4882a593Smuzhiyun 	sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
1613*4882a593Smuzhiyun 	iport->physical_port_index = index;
1614*4882a593Smuzhiyun 	iport->active_phy_mask     = 0;
1615*4882a593Smuzhiyun 	iport->enabled_phy_mask    = 0;
1616*4882a593Smuzhiyun 	iport->last_active_phy     = 0;
1617*4882a593Smuzhiyun 	iport->ready_exit	   = false;
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 	iport->owning_controller = ihost;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	iport->started_request_count = 0;
1622*4882a593Smuzhiyun 	iport->assigned_device_count = 0;
1623*4882a593Smuzhiyun 	iport->hang_detect_users = 0;
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	iport->reserved_rni = SCU_DUMMY_INDEX;
1626*4882a593Smuzhiyun 	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	sci_init_timer(&iport->timer, port_timeout);
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	iport->port_task_scheduler_registers = NULL;
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++)
1633*4882a593Smuzhiyun 		iport->phy_table[index] = NULL;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun 
sci_port_broadcast_change_received(struct isci_port * iport,struct isci_phy * iphy)1636*4882a593Smuzhiyun void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun 	struct isci_host *ihost = iport->owning_controller;
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	/* notify the user. */
1641*4882a593Smuzhiyun 	isci_port_bc_change_received(ihost, iport, iphy);
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun 
wait_port_reset(struct isci_host * ihost,struct isci_port * iport)1644*4882a593Smuzhiyun static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
1645*4882a593Smuzhiyun {
1646*4882a593Smuzhiyun 	wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun 
isci_port_perform_hard_reset(struct isci_host * ihost,struct isci_port * iport,struct isci_phy * iphy)1649*4882a593Smuzhiyun int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1650*4882a593Smuzhiyun 				 struct isci_phy *iphy)
1651*4882a593Smuzhiyun {
1652*4882a593Smuzhiyun 	unsigned long flags;
1653*4882a593Smuzhiyun 	enum sci_status status;
1654*4882a593Smuzhiyun 	int ret = TMF_RESP_FUNC_COMPLETE;
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1657*4882a593Smuzhiyun 		__func__, iport);
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	spin_lock_irqsave(&ihost->scic_lock, flags);
1660*4882a593Smuzhiyun 	set_bit(IPORT_RESET_PENDING, &iport->state);
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun 	#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1663*4882a593Smuzhiyun 	status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	if (status == SCI_SUCCESS) {
1668*4882a593Smuzhiyun 		wait_port_reset(ihost, iport);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 		dev_dbg(&ihost->pdev->dev,
1671*4882a593Smuzhiyun 			"%s: iport = %p; hard reset completion\n",
1672*4882a593Smuzhiyun 			__func__, iport);
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 		if (iport->hard_reset_status != SCI_SUCCESS) {
1675*4882a593Smuzhiyun 			ret = TMF_RESP_FUNC_FAILED;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 			dev_err(&ihost->pdev->dev,
1678*4882a593Smuzhiyun 				"%s: iport = %p; hard reset failed (0x%x)\n",
1679*4882a593Smuzhiyun 				__func__, iport, iport->hard_reset_status);
1680*4882a593Smuzhiyun 		}
1681*4882a593Smuzhiyun 	} else {
1682*4882a593Smuzhiyun 		clear_bit(IPORT_RESET_PENDING, &iport->state);
1683*4882a593Smuzhiyun 		wake_up(&ihost->eventq);
1684*4882a593Smuzhiyun 		ret = TMF_RESP_FUNC_FAILED;
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 		dev_err(&ihost->pdev->dev,
1687*4882a593Smuzhiyun 			"%s: iport = %p; sci_port_hard_reset call"
1688*4882a593Smuzhiyun 			" failed 0x%x\n",
1689*4882a593Smuzhiyun 			__func__, iport, status);
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	}
1692*4882a593Smuzhiyun 	return ret;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun 
isci_ata_check_ready(struct domain_device * dev)1695*4882a593Smuzhiyun int isci_ata_check_ready(struct domain_device *dev)
1696*4882a593Smuzhiyun {
1697*4882a593Smuzhiyun 	struct isci_port *iport = dev->port->lldd_port;
1698*4882a593Smuzhiyun 	struct isci_host *ihost = dev_to_ihost(dev);
1699*4882a593Smuzhiyun 	struct isci_remote_device *idev;
1700*4882a593Smuzhiyun 	unsigned long flags;
1701*4882a593Smuzhiyun 	int rc = 0;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	spin_lock_irqsave(&ihost->scic_lock, flags);
1704*4882a593Smuzhiyun 	idev = isci_lookup_device(dev);
1705*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	if (!idev)
1708*4882a593Smuzhiyun 		goto out;
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 	if (test_bit(IPORT_RESET_PENDING, &iport->state))
1711*4882a593Smuzhiyun 		goto out;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	rc = !!iport->active_phy_mask;
1714*4882a593Smuzhiyun  out:
1715*4882a593Smuzhiyun 	isci_put_device(idev);
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	return rc;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun 
isci_port_deformed(struct asd_sas_phy * phy)1720*4882a593Smuzhiyun void isci_port_deformed(struct asd_sas_phy *phy)
1721*4882a593Smuzhiyun {
1722*4882a593Smuzhiyun 	struct isci_host *ihost = phy->ha->lldd_ha;
1723*4882a593Smuzhiyun 	struct isci_port *iport = phy->port->lldd_port;
1724*4882a593Smuzhiyun 	unsigned long flags;
1725*4882a593Smuzhiyun 	int i;
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun 	/* we got a port notification on a port that was subsequently
1728*4882a593Smuzhiyun 	 * torn down and libsas is just now catching up
1729*4882a593Smuzhiyun 	 */
1730*4882a593Smuzhiyun 	if (!iport)
1731*4882a593Smuzhiyun 		return;
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	spin_lock_irqsave(&ihost->scic_lock, flags);
1734*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_PHYS; i++) {
1735*4882a593Smuzhiyun 		if (iport->active_phy_mask & 1 << i)
1736*4882a593Smuzhiyun 			break;
1737*4882a593Smuzhiyun 	}
1738*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	if (i >= SCI_MAX_PHYS)
1741*4882a593Smuzhiyun 		dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
1742*4882a593Smuzhiyun 			__func__, (long) (iport - &ihost->ports[0]));
1743*4882a593Smuzhiyun }
1744*4882a593Smuzhiyun 
isci_port_formed(struct asd_sas_phy * phy)1745*4882a593Smuzhiyun void isci_port_formed(struct asd_sas_phy *phy)
1746*4882a593Smuzhiyun {
1747*4882a593Smuzhiyun 	struct isci_host *ihost = phy->ha->lldd_ha;
1748*4882a593Smuzhiyun 	struct isci_phy *iphy = to_iphy(phy);
1749*4882a593Smuzhiyun 	struct asd_sas_port *port = phy->port;
1750*4882a593Smuzhiyun 	struct isci_port *iport = NULL;
1751*4882a593Smuzhiyun 	unsigned long flags;
1752*4882a593Smuzhiyun 	int i;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	/* initial ports are formed as the driver is still initializing,
1755*4882a593Smuzhiyun 	 * wait for that process to complete
1756*4882a593Smuzhiyun 	 */
1757*4882a593Smuzhiyun 	wait_for_start(ihost);
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	spin_lock_irqsave(&ihost->scic_lock, flags);
1760*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_PORTS; i++) {
1761*4882a593Smuzhiyun 		iport = &ihost->ports[i];
1762*4882a593Smuzhiyun 		if (iport->active_phy_mask & 1 << iphy->phy_index)
1763*4882a593Smuzhiyun 			break;
1764*4882a593Smuzhiyun 	}
1765*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	if (i >= SCI_MAX_PORTS)
1768*4882a593Smuzhiyun 		iport = NULL;
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	port->lldd_port = iport;
1771*4882a593Smuzhiyun }
1772