xref: /OK3568_Linux_fs/kernel/drivers/scsi/esas2r/esas2r_disc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  *  linux/drivers/scsi/esas2r/esas2r_disc.c
3*4882a593Smuzhiyun  *      esas2r device discovery routines
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (c) 2001-2013 ATTO Technology, Inc.
6*4882a593Smuzhiyun  *  (mailto:linuxdrivers@attotech.com)
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  *  This program is free software; you can redistribute it and/or modify
11*4882a593Smuzhiyun  *  it under the terms of the GNU General Public License as published by
12*4882a593Smuzhiyun  *  the Free Software Foundation; version 2 of the License.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *  This program is distributed in the hope that it will be useful,
15*4882a593Smuzhiyun  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16*4882a593Smuzhiyun  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17*4882a593Smuzhiyun  *  GNU General Public License for more details.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *  NO WARRANTY
20*4882a593Smuzhiyun  *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21*4882a593Smuzhiyun  *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22*4882a593Smuzhiyun  *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23*4882a593Smuzhiyun  *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24*4882a593Smuzhiyun  *  solely responsible for determining the appropriateness of using and
25*4882a593Smuzhiyun  *  distributing the Program and assumes all risks associated with its
26*4882a593Smuzhiyun  *  exercise of rights under this Agreement, including but not limited to
27*4882a593Smuzhiyun  *  the risks and costs of program errors, damage to or loss of data,
28*4882a593Smuzhiyun  *  programs or equipment, and unavailability or interruption of operations.
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  *  DISCLAIMER OF LIABILITY
31*4882a593Smuzhiyun  *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32*4882a593Smuzhiyun  *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33*4882a593Smuzhiyun  *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34*4882a593Smuzhiyun  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35*4882a593Smuzhiyun  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36*4882a593Smuzhiyun  *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37*4882a593Smuzhiyun  *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  *  You should have received a copy of the GNU General Public License
40*4882a593Smuzhiyun  *  along with this program; if not, write to the Free Software
41*4882a593Smuzhiyun  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include "esas2r.h"
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /* Miscellaneous internal discovery routines */
48*4882a593Smuzhiyun static void esas2r_disc_abort(struct esas2r_adapter *a,
49*4882a593Smuzhiyun 			      struct esas2r_request *rq);
50*4882a593Smuzhiyun static bool esas2r_disc_continue(struct esas2r_adapter *a,
51*4882a593Smuzhiyun 				 struct esas2r_request *rq);
52*4882a593Smuzhiyun static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
53*4882a593Smuzhiyun static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
54*4882a593Smuzhiyun static bool esas2r_disc_start_request(struct esas2r_adapter *a,
55*4882a593Smuzhiyun 				      struct esas2r_request *rq);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* Internal discovery routines that process the states */
58*4882a593Smuzhiyun static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
59*4882a593Smuzhiyun 				       struct esas2r_request *rq);
60*4882a593Smuzhiyun static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
61*4882a593Smuzhiyun 					  struct esas2r_request *rq);
62*4882a593Smuzhiyun static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
63*4882a593Smuzhiyun 				struct esas2r_request *rq);
64*4882a593Smuzhiyun static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
65*4882a593Smuzhiyun 				   struct esas2r_request *rq);
66*4882a593Smuzhiyun static bool esas2r_disc_part_info(struct esas2r_adapter *a,
67*4882a593Smuzhiyun 				  struct esas2r_request *rq);
68*4882a593Smuzhiyun static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
69*4882a593Smuzhiyun 				     struct esas2r_request *rq);
70*4882a593Smuzhiyun static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
71*4882a593Smuzhiyun 					  struct esas2r_request *rq);
72*4882a593Smuzhiyun static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
73*4882a593Smuzhiyun 					     struct esas2r_request *rq);
74*4882a593Smuzhiyun static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
75*4882a593Smuzhiyun 					  struct esas2r_request *rq);
76*4882a593Smuzhiyun static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
77*4882a593Smuzhiyun 					     struct esas2r_request *rq);
78*4882a593Smuzhiyun static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
79*4882a593Smuzhiyun 				      struct esas2r_request *rq);
80*4882a593Smuzhiyun static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
81*4882a593Smuzhiyun 					 struct esas2r_request *rq);
82*4882a593Smuzhiyun 
esas2r_disc_initialize(struct esas2r_adapter * a)83*4882a593Smuzhiyun void esas2r_disc_initialize(struct esas2r_adapter *a)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct esas2r_sas_nvram *nvr = a->nvram;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	esas2r_trace_enter();
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	clear_bit(AF_DISC_IN_PROG, &a->flags);
90*4882a593Smuzhiyun 	clear_bit(AF2_DEV_SCAN, &a->flags2);
91*4882a593Smuzhiyun 	clear_bit(AF2_DEV_CNT_OK, &a->flags2);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	a->disc_start_time = jiffies_to_msecs(jiffies);
94*4882a593Smuzhiyun 	a->disc_wait_time = nvr->dev_wait_time * 1000;
95*4882a593Smuzhiyun 	a->disc_wait_cnt = nvr->dev_wait_count;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
98*4882a593Smuzhiyun 		a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/*
101*4882a593Smuzhiyun 	 * If we are doing chip reset or power management processing, always
102*4882a593Smuzhiyun 	 * wait for devices.  use the NVRAM device count if it is greater than
103*4882a593Smuzhiyun 	 * previously discovered devices.
104*4882a593Smuzhiyun 	 */
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	esas2r_hdebug("starting discovery...");
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	a->general_req.interrupt_cx = NULL;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
111*4882a593Smuzhiyun 	    test_bit(AF_POWER_MGT, &a->flags)) {
112*4882a593Smuzhiyun 		if (a->prev_dev_cnt == 0) {
113*4882a593Smuzhiyun 			/* Don't bother waiting if there is nothing to wait
114*4882a593Smuzhiyun 			 * for.
115*4882a593Smuzhiyun 			 */
116*4882a593Smuzhiyun 			a->disc_wait_time = 0;
117*4882a593Smuzhiyun 		} else {
118*4882a593Smuzhiyun 			/*
119*4882a593Smuzhiyun 			 * Set the device wait count to what was previously
120*4882a593Smuzhiyun 			 * found.  We don't care if the user only configured
121*4882a593Smuzhiyun 			 * a time because we know the exact count to wait for.
122*4882a593Smuzhiyun 			 * There is no need to honor the user's wishes to
123*4882a593Smuzhiyun 			 * always wait the full time.
124*4882a593Smuzhiyun 			 */
125*4882a593Smuzhiyun 			a->disc_wait_cnt = a->prev_dev_cnt;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 			/*
128*4882a593Smuzhiyun 			 * bump the minimum wait time to 15 seconds since the
129*4882a593Smuzhiyun 			 * default is 3 (system boot or the boot driver usually
130*4882a593Smuzhiyun 			 * buys us more time).
131*4882a593Smuzhiyun 			 */
132*4882a593Smuzhiyun 			if (a->disc_wait_time < 15000)
133*4882a593Smuzhiyun 				a->disc_wait_time = 15000;
134*4882a593Smuzhiyun 		}
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
138*4882a593Smuzhiyun 	esas2r_trace("disc wait time: %d", a->disc_wait_time);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (a->disc_wait_time == 0)
141*4882a593Smuzhiyun 		esas2r_disc_check_complete(a);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	esas2r_trace_exit();
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
esas2r_disc_start_waiting(struct esas2r_adapter * a)146*4882a593Smuzhiyun void esas2r_disc_start_waiting(struct esas2r_adapter *a)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	unsigned long flags;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	spin_lock_irqsave(&a->mem_lock, flags);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (a->disc_ctx.disc_evt)
153*4882a593Smuzhiyun 		esas2r_disc_start_port(a);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	spin_unlock_irqrestore(&a->mem_lock, flags);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
esas2r_disc_check_for_work(struct esas2r_adapter * a)158*4882a593Smuzhiyun void esas2r_disc_check_for_work(struct esas2r_adapter *a)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	struct esas2r_request *rq = &a->general_req;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* service any pending interrupts first */
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	esas2r_polled_interrupt(a);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/*
167*4882a593Smuzhiyun 	 * now, interrupt processing may have queued up a discovery event.  go
168*4882a593Smuzhiyun 	 * see if we have one to start.  we couldn't start it in the ISR since
169*4882a593Smuzhiyun 	 * polled discovery would cause a deadlock.
170*4882a593Smuzhiyun 	 */
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	esas2r_disc_start_waiting(a);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (rq->interrupt_cx == NULL)
175*4882a593Smuzhiyun 		return;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (rq->req_stat == RS_STARTED
178*4882a593Smuzhiyun 	    && rq->timeout <= RQ_MAX_TIMEOUT) {
179*4882a593Smuzhiyun 		/* wait for the current discovery request to complete. */
180*4882a593Smuzhiyun 		esas2r_wait_request(a, rq);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		if (rq->req_stat == RS_TIMEOUT) {
183*4882a593Smuzhiyun 			esas2r_disc_abort(a, rq);
184*4882a593Smuzhiyun 			esas2r_local_reset_adapter(a);
185*4882a593Smuzhiyun 			return;
186*4882a593Smuzhiyun 		}
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (rq->req_stat == RS_PENDING
190*4882a593Smuzhiyun 	    || rq->req_stat == RS_STARTED)
191*4882a593Smuzhiyun 		return;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	esas2r_disc_continue(a, rq);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
esas2r_disc_check_complete(struct esas2r_adapter * a)196*4882a593Smuzhiyun void esas2r_disc_check_complete(struct esas2r_adapter *a)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	unsigned long flags;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	esas2r_trace_enter();
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* check to see if we should be waiting for devices */
203*4882a593Smuzhiyun 	if (a->disc_wait_time) {
204*4882a593Smuzhiyun 		u32 currtime = jiffies_to_msecs(jiffies);
205*4882a593Smuzhiyun 		u32 time = currtime - a->disc_start_time;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		/*
208*4882a593Smuzhiyun 		 * Wait until the device wait time is exhausted or the device
209*4882a593Smuzhiyun 		 * wait count is satisfied.
210*4882a593Smuzhiyun 		 */
211*4882a593Smuzhiyun 		if (time < a->disc_wait_time
212*4882a593Smuzhiyun 		    && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
213*4882a593Smuzhiyun 			|| a->disc_wait_cnt == 0)) {
214*4882a593Smuzhiyun 			/* After three seconds of waiting, schedule a scan. */
215*4882a593Smuzhiyun 			if (time >= 3000
216*4882a593Smuzhiyun 			    && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
217*4882a593Smuzhiyun 				spin_lock_irqsave(&a->mem_lock, flags);
218*4882a593Smuzhiyun 				esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
219*4882a593Smuzhiyun 				spin_unlock_irqrestore(&a->mem_lock, flags);
220*4882a593Smuzhiyun 			}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 			esas2r_trace_exit();
223*4882a593Smuzhiyun 			return;
224*4882a593Smuzhiyun 		}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 		/*
227*4882a593Smuzhiyun 		 * We are done waiting...we think.  Adjust the wait time to
228*4882a593Smuzhiyun 		 * consume events after the count is met.
229*4882a593Smuzhiyun 		 */
230*4882a593Smuzhiyun 		if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
231*4882a593Smuzhiyun 			a->disc_wait_time = time + 3000;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		/* If we haven't done a full scan yet, do it now. */
234*4882a593Smuzhiyun 		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
235*4882a593Smuzhiyun 			spin_lock_irqsave(&a->mem_lock, flags);
236*4882a593Smuzhiyun 			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
237*4882a593Smuzhiyun 			spin_unlock_irqrestore(&a->mem_lock, flags);
238*4882a593Smuzhiyun 			esas2r_trace_exit();
239*4882a593Smuzhiyun 			return;
240*4882a593Smuzhiyun 		}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		/*
243*4882a593Smuzhiyun 		 * Now, if there is still time left to consume events, continue
244*4882a593Smuzhiyun 		 * waiting.
245*4882a593Smuzhiyun 		 */
246*4882a593Smuzhiyun 		if (time < a->disc_wait_time) {
247*4882a593Smuzhiyun 			esas2r_trace_exit();
248*4882a593Smuzhiyun 			return;
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 	} else {
251*4882a593Smuzhiyun 		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
252*4882a593Smuzhiyun 			spin_lock_irqsave(&a->mem_lock, flags);
253*4882a593Smuzhiyun 			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
254*4882a593Smuzhiyun 			spin_unlock_irqrestore(&a->mem_lock, flags);
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/* We want to stop waiting for devices. */
259*4882a593Smuzhiyun 	a->disc_wait_time = 0;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (test_bit(AF_DISC_POLLED, &a->flags) &&
262*4882a593Smuzhiyun 	    test_bit(AF_DISC_IN_PROG, &a->flags)) {
263*4882a593Smuzhiyun 		/*
264*4882a593Smuzhiyun 		 * Polled discovery is still pending so continue the active
265*4882a593Smuzhiyun 		 * discovery until it is done.  At that point, we will stop
266*4882a593Smuzhiyun 		 * polled discovery and transition to interrupt driven
267*4882a593Smuzhiyun 		 * discovery.
268*4882a593Smuzhiyun 		 */
269*4882a593Smuzhiyun 	} else {
270*4882a593Smuzhiyun 		/*
271*4882a593Smuzhiyun 		 * Done waiting for devices.  Note that we get here immediately
272*4882a593Smuzhiyun 		 * after deferred waiting completes because that is interrupt
273*4882a593Smuzhiyun 		 * driven; i.e. There is no transition.
274*4882a593Smuzhiyun 		 */
275*4882a593Smuzhiyun 		esas2r_disc_fix_curr_requests(a);
276*4882a593Smuzhiyun 		clear_bit(AF_DISC_PENDING, &a->flags);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		/*
279*4882a593Smuzhiyun 		 * We have deferred target state changes until now because we
280*4882a593Smuzhiyun 		 * don't want to report any removals (due to the first arrival)
281*4882a593Smuzhiyun 		 * until the device wait time expires.
282*4882a593Smuzhiyun 		 */
283*4882a593Smuzhiyun 		set_bit(AF_PORT_CHANGE, &a->flags);
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	esas2r_trace_exit();
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
esas2r_disc_queue_event(struct esas2r_adapter * a,u8 disc_evt)289*4882a593Smuzhiyun void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct esas2r_disc_context *dc = &a->disc_ctx;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	esas2r_trace_enter();
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	esas2r_trace("disc_event: %d", disc_evt);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* Initialize the discovery context */
298*4882a593Smuzhiyun 	dc->disc_evt |= disc_evt;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/*
301*4882a593Smuzhiyun 	 * Don't start discovery before or during polled discovery.  if we did,
302*4882a593Smuzhiyun 	 * we would have a deadlock if we are in the ISR already.
303*4882a593Smuzhiyun 	 */
304*4882a593Smuzhiyun 	if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
305*4882a593Smuzhiyun 	    !test_bit(AF_DISC_POLLED, &a->flags))
306*4882a593Smuzhiyun 		esas2r_disc_start_port(a);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	esas2r_trace_exit();
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
esas2r_disc_start_port(struct esas2r_adapter * a)311*4882a593Smuzhiyun bool esas2r_disc_start_port(struct esas2r_adapter *a)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct esas2r_request *rq = &a->general_req;
314*4882a593Smuzhiyun 	struct esas2r_disc_context *dc = &a->disc_ctx;
315*4882a593Smuzhiyun 	bool ret;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	esas2r_trace_enter();
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
320*4882a593Smuzhiyun 		esas2r_trace_exit();
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		return false;
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* If there is a discovery waiting, process it. */
326*4882a593Smuzhiyun 	if (dc->disc_evt) {
327*4882a593Smuzhiyun 		if (test_bit(AF_DISC_POLLED, &a->flags)
328*4882a593Smuzhiyun 		    && a->disc_wait_time == 0) {
329*4882a593Smuzhiyun 			/*
330*4882a593Smuzhiyun 			 * We are doing polled discovery, but we no longer want
331*4882a593Smuzhiyun 			 * to wait for devices.  Stop polled discovery and
332*4882a593Smuzhiyun 			 * transition to interrupt driven discovery.
333*4882a593Smuzhiyun 			 */
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 			esas2r_trace_exit();
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 			return false;
338*4882a593Smuzhiyun 		}
339*4882a593Smuzhiyun 	} else {
340*4882a593Smuzhiyun 		/* Discovery is complete. */
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		esas2r_hdebug("disc done");
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		set_bit(AF_PORT_CHANGE, &a->flags);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 		esas2r_trace_exit();
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		return false;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	/* Handle the discovery context */
352*4882a593Smuzhiyun 	esas2r_trace("disc_evt: %d", dc->disc_evt);
353*4882a593Smuzhiyun 	set_bit(AF_DISC_IN_PROG, &a->flags);
354*4882a593Smuzhiyun 	dc->flags = 0;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (test_bit(AF_DISC_POLLED, &a->flags))
357*4882a593Smuzhiyun 		dc->flags |= DCF_POLLED;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	rq->interrupt_cx = dc;
360*4882a593Smuzhiyun 	rq->req_stat = RS_SUCCESS;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* Decode the event code */
363*4882a593Smuzhiyun 	if (dc->disc_evt & DCDE_DEV_SCAN) {
364*4882a593Smuzhiyun 		dc->disc_evt &= ~DCDE_DEV_SCAN;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		dc->flags |= DCF_DEV_SCAN;
367*4882a593Smuzhiyun 		dc->state = DCS_BLOCK_DEV_SCAN;
368*4882a593Smuzhiyun 	} else if (dc->disc_evt & DCDE_DEV_CHANGE) {
369*4882a593Smuzhiyun 		dc->disc_evt &= ~DCDE_DEV_CHANGE;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		dc->flags |= DCF_DEV_CHANGE;
372*4882a593Smuzhiyun 		dc->state = DCS_DEV_RMV;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* Continue interrupt driven discovery */
376*4882a593Smuzhiyun 	if (!test_bit(AF_DISC_POLLED, &a->flags))
377*4882a593Smuzhiyun 		ret = esas2r_disc_continue(a, rq);
378*4882a593Smuzhiyun 	else
379*4882a593Smuzhiyun 		ret = true;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	esas2r_trace_exit();
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	return ret;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
esas2r_disc_continue(struct esas2r_adapter * a,struct esas2r_request * rq)386*4882a593Smuzhiyun static bool esas2r_disc_continue(struct esas2r_adapter *a,
387*4882a593Smuzhiyun 				 struct esas2r_request *rq)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
390*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
391*4882a593Smuzhiyun 	bool rslt;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	/* Device discovery/removal */
394*4882a593Smuzhiyun 	while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
395*4882a593Smuzhiyun 		rslt = false;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 		switch (dc->state) {
398*4882a593Smuzhiyun 		case DCS_DEV_RMV:
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 			rslt = esas2r_disc_dev_remove(a, rq);
401*4882a593Smuzhiyun 			break;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 		case DCS_DEV_ADD:
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 			rslt = esas2r_disc_dev_add(a, rq);
406*4882a593Smuzhiyun 			break;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		case DCS_BLOCK_DEV_SCAN:
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 			rslt = esas2r_disc_block_dev_scan(a, rq);
411*4882a593Smuzhiyun 			break;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		case DCS_RAID_GRP_INFO:
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 			rslt = esas2r_disc_raid_grp_info(a, rq);
416*4882a593Smuzhiyun 			break;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		case DCS_PART_INFO:
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 			rslt = esas2r_disc_part_info(a, rq);
421*4882a593Smuzhiyun 			break;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		case DCS_PT_DEV_INFO:
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 			rslt = esas2r_disc_passthru_dev_info(a, rq);
426*4882a593Smuzhiyun 			break;
427*4882a593Smuzhiyun 		case DCS_PT_DEV_ADDR:
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 			rslt = esas2r_disc_passthru_dev_addr(a, rq);
430*4882a593Smuzhiyun 			break;
431*4882a593Smuzhiyun 		case DCS_DISC_DONE:
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 			dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
434*4882a593Smuzhiyun 			break;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		default:
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 			esas2r_bugon();
439*4882a593Smuzhiyun 			dc->state = DCS_DISC_DONE;
440*4882a593Smuzhiyun 			break;
441*4882a593Smuzhiyun 		}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 		if (rslt)
444*4882a593Smuzhiyun 			return true;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/* Discovery is done...for now. */
448*4882a593Smuzhiyun 	rq->interrupt_cx = NULL;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (!test_bit(AF_DISC_PENDING, &a->flags))
451*4882a593Smuzhiyun 		esas2r_disc_fix_curr_requests(a);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	clear_bit(AF_DISC_IN_PROG, &a->flags);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	/* Start the next discovery. */
456*4882a593Smuzhiyun 	return esas2r_disc_start_port(a);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
esas2r_disc_start_request(struct esas2r_adapter * a,struct esas2r_request * rq)459*4882a593Smuzhiyun static bool esas2r_disc_start_request(struct esas2r_adapter *a,
460*4882a593Smuzhiyun 				      struct esas2r_request *rq)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	unsigned long flags;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* Set the timeout to a minimum value. */
465*4882a593Smuzhiyun 	if (rq->timeout < ESAS2R_DEFAULT_TMO)
466*4882a593Smuzhiyun 		rq->timeout = ESAS2R_DEFAULT_TMO;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/*
469*4882a593Smuzhiyun 	 * Override the request type to distinguish discovery requests.  If we
470*4882a593Smuzhiyun 	 * end up deferring the request, esas2r_disc_local_start_request()
471*4882a593Smuzhiyun 	 * will be called to restart it.
472*4882a593Smuzhiyun 	 */
473*4882a593Smuzhiyun 	rq->req_type = RT_DISC_REQ;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	spin_lock_irqsave(&a->queue_lock, flags);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
478*4882a593Smuzhiyun 	    !test_bit(AF_FLASHING, &a->flags))
479*4882a593Smuzhiyun 		esas2r_disc_local_start_request(a, rq);
480*4882a593Smuzhiyun 	else
481*4882a593Smuzhiyun 		list_add_tail(&rq->req_list, &a->defer_list);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	spin_unlock_irqrestore(&a->queue_lock, flags);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	return true;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
esas2r_disc_local_start_request(struct esas2r_adapter * a,struct esas2r_request * rq)488*4882a593Smuzhiyun void esas2r_disc_local_start_request(struct esas2r_adapter *a,
489*4882a593Smuzhiyun 				     struct esas2r_request *rq)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	esas2r_trace_enter();
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	list_add_tail(&rq->req_list, &a->active_list);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	esas2r_start_vda_request(a, rq);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	esas2r_trace_exit();
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	return;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun 
esas2r_disc_abort(struct esas2r_adapter * a,struct esas2r_request * rq)502*4882a593Smuzhiyun static void esas2r_disc_abort(struct esas2r_adapter *a,
503*4882a593Smuzhiyun 			      struct esas2r_request *rq)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
506*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	esas2r_trace_enter();
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	/* abort the current discovery */
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	dc->state = DCS_DISC_DONE;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	esas2r_trace_exit();
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
esas2r_disc_block_dev_scan(struct esas2r_adapter * a,struct esas2r_request * rq)517*4882a593Smuzhiyun static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
518*4882a593Smuzhiyun 				       struct esas2r_request *rq)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
521*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
522*4882a593Smuzhiyun 	bool rslt;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	esas2r_trace_enter();
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	esas2r_rq_init_request(rq, a);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	esas2r_build_mgt_req(a,
529*4882a593Smuzhiyun 			     rq,
530*4882a593Smuzhiyun 			     VDAMGT_DEV_SCAN,
531*4882a593Smuzhiyun 			     0,
532*4882a593Smuzhiyun 			     0,
533*4882a593Smuzhiyun 			     0,
534*4882a593Smuzhiyun 			     NULL);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	rq->comp_cb = esas2r_disc_block_dev_scan_cb;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	rq->timeout = 30000;
539*4882a593Smuzhiyun 	rq->interrupt_cx = dc;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	rslt = esas2r_disc_start_request(a, rq);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	esas2r_trace_exit();
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	return rslt;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
esas2r_disc_block_dev_scan_cb(struct esas2r_adapter * a,struct esas2r_request * rq)548*4882a593Smuzhiyun static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
549*4882a593Smuzhiyun 					  struct esas2r_request *rq)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
552*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
553*4882a593Smuzhiyun 	unsigned long flags;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	esas2r_trace_enter();
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	spin_lock_irqsave(&a->mem_lock, flags);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (rq->req_stat == RS_SUCCESS)
560*4882a593Smuzhiyun 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	dc->state = DCS_RAID_GRP_INFO;
563*4882a593Smuzhiyun 	dc->raid_grp_ix = 0;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	esas2r_rq_destroy_request(rq, a);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	/* continue discovery if it's interrupt driven */
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	if (!(dc->flags & DCF_POLLED))
570*4882a593Smuzhiyun 		esas2r_disc_continue(a, rq);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	spin_unlock_irqrestore(&a->mem_lock, flags);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	esas2r_trace_exit();
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun 
esas2r_disc_raid_grp_info(struct esas2r_adapter * a,struct esas2r_request * rq)577*4882a593Smuzhiyun static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
578*4882a593Smuzhiyun 				      struct esas2r_request *rq)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
581*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
582*4882a593Smuzhiyun 	bool rslt;
583*4882a593Smuzhiyun 	struct atto_vda_grp_info *grpinfo;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	esas2r_trace_enter();
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
590*4882a593Smuzhiyun 		dc->state = DCS_DISC_DONE;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 		esas2r_trace_exit();
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 		return false;
595*4882a593Smuzhiyun 	}
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	esas2r_rq_init_request(rq, a);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	esas2r_build_mgt_req(a,
604*4882a593Smuzhiyun 			     rq,
605*4882a593Smuzhiyun 			     VDAMGT_GRP_INFO,
606*4882a593Smuzhiyun 			     dc->scan_gen,
607*4882a593Smuzhiyun 			     0,
608*4882a593Smuzhiyun 			     sizeof(struct atto_vda_grp_info),
609*4882a593Smuzhiyun 			     NULL);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	grpinfo->grp_index = dc->raid_grp_ix;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	rq->comp_cb = esas2r_disc_raid_grp_info_cb;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	rq->interrupt_cx = dc;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	rslt = esas2r_disc_start_request(a, rq);
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	esas2r_trace_exit();
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	return rslt;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
esas2r_disc_raid_grp_info_cb(struct esas2r_adapter * a,struct esas2r_request * rq)624*4882a593Smuzhiyun static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
625*4882a593Smuzhiyun 					 struct esas2r_request *rq)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
628*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
629*4882a593Smuzhiyun 	unsigned long flags;
630*4882a593Smuzhiyun 	struct atto_vda_grp_info *grpinfo;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	esas2r_trace_enter();
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	spin_lock_irqsave(&a->mem_lock, flags);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	if (rq->req_stat == RS_SCAN_GEN) {
637*4882a593Smuzhiyun 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
638*4882a593Smuzhiyun 		dc->raid_grp_ix = 0;
639*4882a593Smuzhiyun 		goto done;
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	if (rq->req_stat == RS_SUCCESS) {
643*4882a593Smuzhiyun 		grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		if (grpinfo->status != VDA_GRP_STAT_ONLINE
646*4882a593Smuzhiyun 		    && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
647*4882a593Smuzhiyun 			/* go to the next group. */
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 			dc->raid_grp_ix++;
650*4882a593Smuzhiyun 		} else {
651*4882a593Smuzhiyun 			memcpy(&dc->raid_grp_name[0],
652*4882a593Smuzhiyun 			       &grpinfo->grp_name[0],
653*4882a593Smuzhiyun 			       sizeof(grpinfo->grp_name));
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 			dc->interleave = le32_to_cpu(grpinfo->interleave);
656*4882a593Smuzhiyun 			dc->block_size = le32_to_cpu(grpinfo->block_size);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 			dc->state = DCS_PART_INFO;
659*4882a593Smuzhiyun 			dc->part_num = 0;
660*4882a593Smuzhiyun 		}
661*4882a593Smuzhiyun 	} else {
662*4882a593Smuzhiyun 		if (!(rq->req_stat == RS_GRP_INVALID)) {
663*4882a593Smuzhiyun 			esas2r_log(ESAS2R_LOG_WARN,
664*4882a593Smuzhiyun 				   "A request for RAID group info failed - "
665*4882a593Smuzhiyun 				   "returned with %x",
666*4882a593Smuzhiyun 				   rq->req_stat);
667*4882a593Smuzhiyun 		}
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 		dc->dev_ix = 0;
670*4882a593Smuzhiyun 		dc->state = DCS_PT_DEV_INFO;
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun done:
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	esas2r_rq_destroy_request(rq, a);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	/* continue discovery if it's interrupt driven */
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	if (!(dc->flags & DCF_POLLED))
680*4882a593Smuzhiyun 		esas2r_disc_continue(a, rq);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	spin_unlock_irqrestore(&a->mem_lock, flags);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	esas2r_trace_exit();
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
esas2r_disc_part_info(struct esas2r_adapter * a,struct esas2r_request * rq)687*4882a593Smuzhiyun static bool esas2r_disc_part_info(struct esas2r_adapter *a,
688*4882a593Smuzhiyun 				  struct esas2r_request *rq)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
691*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
692*4882a593Smuzhiyun 	bool rslt;
693*4882a593Smuzhiyun 	struct atto_vdapart_info *partinfo;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	esas2r_trace_enter();
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	esas2r_trace("part_num: %d", dc->part_num);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	if (dc->part_num >= VDA_MAX_PARTITIONS) {
700*4882a593Smuzhiyun 		dc->state = DCS_RAID_GRP_INFO;
701*4882a593Smuzhiyun 		dc->raid_grp_ix++;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 		esas2r_trace_exit();
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 		return false;
706*4882a593Smuzhiyun 	}
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	esas2r_rq_init_request(rq, a);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	memset(partinfo, 0, sizeof(struct atto_vdapart_info));
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	esas2r_build_mgt_req(a,
715*4882a593Smuzhiyun 			     rq,
716*4882a593Smuzhiyun 			     VDAMGT_PART_INFO,
717*4882a593Smuzhiyun 			     dc->scan_gen,
718*4882a593Smuzhiyun 			     0,
719*4882a593Smuzhiyun 			     sizeof(struct atto_vdapart_info),
720*4882a593Smuzhiyun 			     NULL);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	partinfo->part_no = dc->part_num;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	memcpy(&partinfo->grp_name[0],
725*4882a593Smuzhiyun 	       &dc->raid_grp_name[0],
726*4882a593Smuzhiyun 	       sizeof(partinfo->grp_name));
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	rq->comp_cb = esas2r_disc_part_info_cb;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	rq->interrupt_cx = dc;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	rslt = esas2r_disc_start_request(a, rq);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	esas2r_trace_exit();
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	return rslt;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
esas2r_disc_part_info_cb(struct esas2r_adapter * a,struct esas2r_request * rq)739*4882a593Smuzhiyun static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
740*4882a593Smuzhiyun 				     struct esas2r_request *rq)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
743*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
744*4882a593Smuzhiyun 	unsigned long flags;
745*4882a593Smuzhiyun 	struct atto_vdapart_info *partinfo;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	esas2r_trace_enter();
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	spin_lock_irqsave(&a->mem_lock, flags);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	if (rq->req_stat == RS_SCAN_GEN) {
752*4882a593Smuzhiyun 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
753*4882a593Smuzhiyun 		dc->raid_grp_ix = 0;
754*4882a593Smuzhiyun 		dc->state = DCS_RAID_GRP_INFO;
755*4882a593Smuzhiyun 	} else if (rq->req_stat == RS_SUCCESS) {
756*4882a593Smuzhiyun 		partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 		dc->part_num = partinfo->part_no;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 		dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 		esas2r_targ_db_add_raid(a, dc);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		dc->part_num++;
765*4882a593Smuzhiyun 	} else {
766*4882a593Smuzhiyun 		if (!(rq->req_stat == RS_PART_LAST)) {
767*4882a593Smuzhiyun 			esas2r_log(ESAS2R_LOG_WARN,
768*4882a593Smuzhiyun 				   "A request for RAID group partition info "
769*4882a593Smuzhiyun 				   "failed - status:%d", rq->req_stat);
770*4882a593Smuzhiyun 		}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		dc->state = DCS_RAID_GRP_INFO;
773*4882a593Smuzhiyun 		dc->raid_grp_ix++;
774*4882a593Smuzhiyun 	}
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	esas2r_rq_destroy_request(rq, a);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	/* continue discovery if it's interrupt driven */
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	if (!(dc->flags & DCF_POLLED))
781*4882a593Smuzhiyun 		esas2r_disc_continue(a, rq);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	spin_unlock_irqrestore(&a->mem_lock, flags);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	esas2r_trace_exit();
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
esas2r_disc_passthru_dev_info(struct esas2r_adapter * a,struct esas2r_request * rq)788*4882a593Smuzhiyun static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
789*4882a593Smuzhiyun 					  struct esas2r_request *rq)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
792*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
793*4882a593Smuzhiyun 	bool rslt;
794*4882a593Smuzhiyun 	struct atto_vda_devinfo *devinfo;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	esas2r_trace_enter();
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	esas2r_trace("dev_ix: %d", dc->dev_ix);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	esas2r_rq_init_request(rq, a);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	esas2r_build_mgt_req(a,
807*4882a593Smuzhiyun 			     rq,
808*4882a593Smuzhiyun 			     VDAMGT_DEV_PT_INFO,
809*4882a593Smuzhiyun 			     dc->scan_gen,
810*4882a593Smuzhiyun 			     dc->dev_ix,
811*4882a593Smuzhiyun 			     sizeof(struct atto_vda_devinfo),
812*4882a593Smuzhiyun 			     NULL);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	rq->interrupt_cx = dc;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	rslt = esas2r_disc_start_request(a, rq);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	esas2r_trace_exit();
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	return rslt;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun 
esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter * a,struct esas2r_request * rq)825*4882a593Smuzhiyun static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
826*4882a593Smuzhiyun 					     struct esas2r_request *rq)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
829*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
830*4882a593Smuzhiyun 	unsigned long flags;
831*4882a593Smuzhiyun 	struct atto_vda_devinfo *devinfo;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	esas2r_trace_enter();
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	spin_lock_irqsave(&a->mem_lock, flags);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (rq->req_stat == RS_SCAN_GEN) {
838*4882a593Smuzhiyun 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
839*4882a593Smuzhiyun 		dc->dev_ix = 0;
840*4882a593Smuzhiyun 		dc->state = DCS_PT_DEV_INFO;
841*4882a593Smuzhiyun 	} else if (rq->req_stat == RS_SUCCESS) {
842*4882a593Smuzhiyun 		devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 		dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 		if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
849*4882a593Smuzhiyun 			dc->curr_phys_id =
850*4882a593Smuzhiyun 				le16_to_cpu(devinfo->phys_target_id);
851*4882a593Smuzhiyun 			dc->dev_addr_type = ATTO_GDA_AT_PORT;
852*4882a593Smuzhiyun 			dc->state = DCS_PT_DEV_ADDR;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 			esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
855*4882a593Smuzhiyun 			esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
856*4882a593Smuzhiyun 		} else {
857*4882a593Smuzhiyun 			dc->dev_ix++;
858*4882a593Smuzhiyun 		}
859*4882a593Smuzhiyun 	} else {
860*4882a593Smuzhiyun 		if (!(rq->req_stat == RS_DEV_INVALID)) {
861*4882a593Smuzhiyun 			esas2r_log(ESAS2R_LOG_WARN,
862*4882a593Smuzhiyun 				   "A request for device information failed - "
863*4882a593Smuzhiyun 				   "status:%d", rq->req_stat);
864*4882a593Smuzhiyun 		}
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 		dc->state = DCS_DISC_DONE;
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	esas2r_rq_destroy_request(rq, a);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	/* continue discovery if it's interrupt driven */
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	if (!(dc->flags & DCF_POLLED))
874*4882a593Smuzhiyun 		esas2r_disc_continue(a, rq);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	spin_unlock_irqrestore(&a->mem_lock, flags);
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	esas2r_trace_exit();
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun 
esas2r_disc_passthru_dev_addr(struct esas2r_adapter * a,struct esas2r_request * rq)881*4882a593Smuzhiyun static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
882*4882a593Smuzhiyun 					  struct esas2r_request *rq)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
885*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
886*4882a593Smuzhiyun 	bool rslt;
887*4882a593Smuzhiyun 	struct atto_ioctl *hi;
888*4882a593Smuzhiyun 	struct esas2r_sg_context sgc;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	esas2r_trace_enter();
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	esas2r_rq_init_request(rq, a);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	/* format the request. */
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	sgc.cur_offset = NULL;
897*4882a593Smuzhiyun 	sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
898*4882a593Smuzhiyun 	sgc.length = offsetof(struct atto_ioctl, data)
899*4882a593Smuzhiyun 		     + sizeof(struct atto_hba_get_device_address);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	if (!esas2r_build_sg_list(a, rq, &sgc)) {
906*4882a593Smuzhiyun 		esas2r_rq_destroy_request(rq, a);
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 		esas2r_trace_exit();
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 		return false;
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	rq->interrupt_cx = dc;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	/* format the IOCTL data. */
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	hi = (struct atto_ioctl *)a->disc_buffer;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	hi->version = ATTO_VER_GET_DEV_ADDR0;
924*4882a593Smuzhiyun 	hi->function = ATTO_FUNC_GET_DEV_ADDR;
925*4882a593Smuzhiyun 	hi->flags = HBAF_TUNNEL;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
928*4882a593Smuzhiyun 	hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	/* start it up. */
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	rslt = esas2r_disc_start_request(a, rq);
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	esas2r_trace_exit();
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	return rslt;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun 
esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter * a,struct esas2r_request * rq)939*4882a593Smuzhiyun static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
940*4882a593Smuzhiyun 					     struct esas2r_request *rq)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
943*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
944*4882a593Smuzhiyun 	struct esas2r_target *t = NULL;
945*4882a593Smuzhiyun 	unsigned long flags;
946*4882a593Smuzhiyun 	struct atto_ioctl *hi;
947*4882a593Smuzhiyun 	u16 addrlen;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	esas2r_trace_enter();
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	spin_lock_irqsave(&a->mem_lock, flags);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	hi = (struct atto_ioctl *)a->disc_buffer;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	if (rq->req_stat == RS_SUCCESS
956*4882a593Smuzhiyun 	    && hi->status == ATTO_STS_SUCCESS) {
957*4882a593Smuzhiyun 		addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 		if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
960*4882a593Smuzhiyun 			if (addrlen == sizeof(u64))
961*4882a593Smuzhiyun 				memcpy(&dc->sas_addr,
962*4882a593Smuzhiyun 				       &hi->data.get_dev_addr.address[0],
963*4882a593Smuzhiyun 				       addrlen);
964*4882a593Smuzhiyun 			else
965*4882a593Smuzhiyun 				memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 			/* Get the unique identifier. */
968*4882a593Smuzhiyun 			dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 			goto next_dev_addr;
971*4882a593Smuzhiyun 		} else {
972*4882a593Smuzhiyun 			/* Add the pass through target. */
973*4882a593Smuzhiyun 			if (HIBYTE(addrlen) == 0) {
974*4882a593Smuzhiyun 				t = esas2r_targ_db_add_pthru(a,
975*4882a593Smuzhiyun 							     dc,
976*4882a593Smuzhiyun 							     &hi->data.
977*4882a593Smuzhiyun 							     get_dev_addr.
978*4882a593Smuzhiyun 							     address[0],
979*4882a593Smuzhiyun 							     (u8)hi->data.
980*4882a593Smuzhiyun 							     get_dev_addr.
981*4882a593Smuzhiyun 							     addr_len);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 				if (t)
984*4882a593Smuzhiyun 					memcpy(&t->sas_addr, &dc->sas_addr,
985*4882a593Smuzhiyun 					       sizeof(t->sas_addr));
986*4882a593Smuzhiyun 			} else {
987*4882a593Smuzhiyun 				/* getting the back end data failed */
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 				esas2r_log(ESAS2R_LOG_WARN,
990*4882a593Smuzhiyun 					   "an error occurred retrieving the "
991*4882a593Smuzhiyun 					   "back end data (%s:%d)",
992*4882a593Smuzhiyun 					   __func__,
993*4882a593Smuzhiyun 					   __LINE__);
994*4882a593Smuzhiyun 			}
995*4882a593Smuzhiyun 		}
996*4882a593Smuzhiyun 	} else {
997*4882a593Smuzhiyun 		/* getting the back end data failed */
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		esas2r_log(ESAS2R_LOG_WARN,
1000*4882a593Smuzhiyun 			   "an error occurred retrieving the back end data - "
1001*4882a593Smuzhiyun 			   "rq->req_stat:%d hi->status:%d",
1002*4882a593Smuzhiyun 			   rq->req_stat, hi->status);
1003*4882a593Smuzhiyun 	}
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	/* proceed to the next device. */
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	if (dc->flags & DCF_DEV_SCAN) {
1008*4882a593Smuzhiyun 		dc->dev_ix++;
1009*4882a593Smuzhiyun 		dc->state = DCS_PT_DEV_INFO;
1010*4882a593Smuzhiyun 	} else if (dc->flags & DCF_DEV_CHANGE) {
1011*4882a593Smuzhiyun 		dc->curr_targ++;
1012*4882a593Smuzhiyun 		dc->state = DCS_DEV_ADD;
1013*4882a593Smuzhiyun 	} else {
1014*4882a593Smuzhiyun 		esas2r_bugon();
1015*4882a593Smuzhiyun 	}
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun next_dev_addr:
1018*4882a593Smuzhiyun 	esas2r_rq_destroy_request(rq, a);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	/* continue discovery if it's interrupt driven */
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	if (!(dc->flags & DCF_POLLED))
1023*4882a593Smuzhiyun 		esas2r_disc_continue(a, rq);
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	spin_unlock_irqrestore(&a->mem_lock, flags);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	esas2r_trace_exit();
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun 
esas2r_disc_get_phys_addr(struct esas2r_sg_context * sgc,u64 * addr)1030*4882a593Smuzhiyun static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun 	struct esas2r_adapter *a = sgc->adapter;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	if (sgc->length > ESAS2R_DISC_BUF_LEN)
1035*4882a593Smuzhiyun 		esas2r_bugon();
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	*addr = a->uncached_phys
1038*4882a593Smuzhiyun 		+ (u64)((u8 *)a->disc_buffer - a->uncached);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	return sgc->length;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun 
esas2r_disc_dev_remove(struct esas2r_adapter * a,struct esas2r_request * rq)1043*4882a593Smuzhiyun static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
1044*4882a593Smuzhiyun 				   struct esas2r_request *rq)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
1047*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
1048*4882a593Smuzhiyun 	struct esas2r_target *t;
1049*4882a593Smuzhiyun 	struct esas2r_target *t2;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	esas2r_trace_enter();
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	/* process removals. */
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	for (t = a->targetdb; t < a->targetdb_end; t++) {
1056*4882a593Smuzhiyun 		if (t->new_target_state != TS_NOT_PRESENT)
1057*4882a593Smuzhiyun 			continue;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 		t->new_target_state = TS_INVALID;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 		/* remove the right target! */
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 		t2 =
1064*4882a593Smuzhiyun 			esas2r_targ_db_find_by_virt_id(a,
1065*4882a593Smuzhiyun 						       esas2r_targ_get_id(t,
1066*4882a593Smuzhiyun 									  a));
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 		if (t2)
1069*4882a593Smuzhiyun 			esas2r_targ_db_remove(a, t2);
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	/* removals complete.  process arrivals. */
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	dc->state = DCS_DEV_ADD;
1075*4882a593Smuzhiyun 	dc->curr_targ = a->targetdb;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	esas2r_trace_exit();
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	return false;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
esas2r_disc_dev_add(struct esas2r_adapter * a,struct esas2r_request * rq)1082*4882a593Smuzhiyun static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
1083*4882a593Smuzhiyun 				struct esas2r_request *rq)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun 	struct esas2r_disc_context *dc =
1086*4882a593Smuzhiyun 		(struct esas2r_disc_context *)rq->interrupt_cx;
1087*4882a593Smuzhiyun 	struct esas2r_target *t = dc->curr_targ;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	if (t >= a->targetdb_end) {
1090*4882a593Smuzhiyun 		/* done processing state changes. */
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 		dc->state = DCS_DISC_DONE;
1093*4882a593Smuzhiyun 	} else if (t->new_target_state == TS_PRESENT) {
1094*4882a593Smuzhiyun 		struct atto_vda_ae_lu *luevt = &t->lu_event;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 		esas2r_trace_enter();
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 		/* clear this now in case more events come in. */
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 		t->new_target_state = TS_INVALID;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 		/* setup the discovery context for adding this device. */
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 		dc->curr_virt_id = esas2r_targ_get_id(t, a);
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 		if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1107*4882a593Smuzhiyun 		     + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
1108*4882a593Smuzhiyun 		    && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
1109*4882a593Smuzhiyun 			dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
1110*4882a593Smuzhiyun 			dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
1111*4882a593Smuzhiyun 		} else {
1112*4882a593Smuzhiyun 			dc->block_size = 0;
1113*4882a593Smuzhiyun 			dc->interleave = 0;
1114*4882a593Smuzhiyun 		}
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 		/* determine the device type being added. */
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 		if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
1119*4882a593Smuzhiyun 			if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
1120*4882a593Smuzhiyun 				dc->state = DCS_PT_DEV_ADDR;
1121*4882a593Smuzhiyun 				dc->dev_addr_type = ATTO_GDA_AT_PORT;
1122*4882a593Smuzhiyun 				dc->curr_phys_id = luevt->wphys_target_id;
1123*4882a593Smuzhiyun 			} else {
1124*4882a593Smuzhiyun 				esas2r_log(ESAS2R_LOG_WARN,
1125*4882a593Smuzhiyun 					   "luevt->dwevent does not have the "
1126*4882a593Smuzhiyun 					   "VDAAE_LU_PHYS_ID bit set (%s:%d)",
1127*4882a593Smuzhiyun 					   __func__, __LINE__);
1128*4882a593Smuzhiyun 			}
1129*4882a593Smuzhiyun 		} else {
1130*4882a593Smuzhiyun 			dc->raid_grp_name[0] = 0;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 			esas2r_targ_db_add_raid(a, dc);
1133*4882a593Smuzhiyun 		}
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 		esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
1136*4882a593Smuzhiyun 		esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
1137*4882a593Smuzhiyun 		esas2r_trace("dwevent: %d", luevt->dwevent);
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 		esas2r_trace_exit();
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	if (dc->state == DCS_DEV_ADD) {
1143*4882a593Smuzhiyun 		/* go to the next device. */
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 		dc->curr_targ++;
1146*4882a593Smuzhiyun 	}
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	return false;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun /*
1152*4882a593Smuzhiyun  * When discovery is done, find all requests on defer queue and
1153*4882a593Smuzhiyun  * test if they need to be modified. If a target is no longer present
1154*4882a593Smuzhiyun  * then complete the request with RS_SEL. Otherwise, update the
1155*4882a593Smuzhiyun  * target_id since after a hibernate it can be a different value.
1156*4882a593Smuzhiyun  * VDA does not make passthrough target IDs persistent.
1157*4882a593Smuzhiyun  */
esas2r_disc_fix_curr_requests(struct esas2r_adapter * a)1158*4882a593Smuzhiyun static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	unsigned long flags;
1161*4882a593Smuzhiyun 	struct esas2r_target *t;
1162*4882a593Smuzhiyun 	struct esas2r_request *rq;
1163*4882a593Smuzhiyun 	struct list_head *element;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	/* update virt_targ_id in any outstanding esas2r_requests  */
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	spin_lock_irqsave(&a->queue_lock, flags);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	list_for_each(element, &a->defer_list) {
1170*4882a593Smuzhiyun 		rq = list_entry(element, struct esas2r_request, req_list);
1171*4882a593Smuzhiyun 		if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1172*4882a593Smuzhiyun 			t = a->targetdb + rq->target_id;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 			if (t->target_state == TS_PRESENT)
1175*4882a593Smuzhiyun 				rq->vrq->scsi.target_id = le16_to_cpu(
1176*4882a593Smuzhiyun 					t->virt_targ_id);
1177*4882a593Smuzhiyun 			else
1178*4882a593Smuzhiyun 				rq->req_stat = RS_SEL;
1179*4882a593Smuzhiyun 		}
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	}
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	spin_unlock_irqrestore(&a->queue_lock, flags);
1184*4882a593Smuzhiyun }
1185