xref: /OK3568_Linux_fs/kernel/drivers/scsi/aic7xxx/aic79xx.seq (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/*
2*4882a593Smuzhiyun * Adaptec U320 device driver firmware for Linux and FreeBSD.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 1994-2001, 2004 Justin T. Gibbs.
5*4882a593Smuzhiyun * Copyright (c) 2000-2002 Adaptec Inc.
6*4882a593Smuzhiyun * All rights reserved.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
9*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
10*4882a593Smuzhiyun * are met:
11*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
12*4882a593Smuzhiyun *    notice, this list of conditions, and the following disclaimer,
13*4882a593Smuzhiyun *    without modification.
14*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15*4882a593Smuzhiyun *    substantially similar to the "NO WARRANTY" disclaimer below
16*4882a593Smuzhiyun *    ("Disclaimer") and any redistribution must be conditioned upon
17*4882a593Smuzhiyun *    including a substantially similar Disclaimer requirement for further
18*4882a593Smuzhiyun *    binary redistribution.
19*4882a593Smuzhiyun * 3. Neither the names of the above-listed copyright holders nor the names
20*4882a593Smuzhiyun *    of any contributors may be used to endorse or promote products derived
21*4882a593Smuzhiyun *    from this software without specific prior written permission.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Alternatively, this software may be distributed under the terms of the
24*4882a593Smuzhiyun * GNU General Public License ("GPL") version 2 as published by the Free
25*4882a593Smuzhiyun * Software Foundation.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * NO WARRANTY
28*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32*4882a593Smuzhiyun * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33*4882a593Smuzhiyun * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34*4882a593Smuzhiyun * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35*4882a593Smuzhiyun * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36*4882a593Smuzhiyun * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37*4882a593Smuzhiyun * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38*4882a593Smuzhiyun * POSSIBILITY OF SUCH DAMAGES.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * $FreeBSD$
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun
43*4882a593SmuzhiyunVERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $"
44*4882a593SmuzhiyunPATCH_ARG_LIST = "struct ahd_softc *ahd"
45*4882a593SmuzhiyunPREFIX = "ahd_"
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun#include "aic79xx.reg"
48*4882a593Smuzhiyun#include "scsi_message.h"
49*4882a593Smuzhiyun
50*4882a593Smuzhiyunrestart:
51*4882a593Smuzhiyunif ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
52*4882a593Smuzhiyun	test	SEQINTCODE, 0xFF jz idle_loop;
53*4882a593Smuzhiyun	SET_SEQINTCODE(NO_SEQINT)
54*4882a593Smuzhiyun}
55*4882a593Smuzhiyun
56*4882a593Smuzhiyunidle_loop:
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun	if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
59*4882a593Smuzhiyun		/*
60*4882a593Smuzhiyun		 * Convert ERROR status into a sequencer
61*4882a593Smuzhiyun		 * interrupt to handle the case of an
62*4882a593Smuzhiyun		 * interrupt collision on the hardware
63*4882a593Smuzhiyun		 * setting of HWERR.
64*4882a593Smuzhiyun		 */
65*4882a593Smuzhiyun		test	ERROR, 0xFF jz no_error_set;
66*4882a593Smuzhiyun		SET_SEQINTCODE(SAW_HWERR)
67*4882a593Smuzhiyunno_error_set:
68*4882a593Smuzhiyun	}
69*4882a593Smuzhiyun	SET_MODE(M_SCSI, M_SCSI)
70*4882a593Smuzhiyun	test	SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus;
71*4882a593Smuzhiyun	test	SEQ_FLAGS2, SELECTOUT_QFROZEN jz check_waiting_list;
72*4882a593Smuzhiyun	/*
73*4882a593Smuzhiyun	 * If the kernel has caught up with us, thaw the queue.
74*4882a593Smuzhiyun	 */
75*4882a593Smuzhiyun	mov	A, KERNEL_QFREEZE_COUNT;
76*4882a593Smuzhiyun	cmp	QFREEZE_COUNT, A jne check_frozen_completions;
77*4882a593Smuzhiyun	mov	A, KERNEL_QFREEZE_COUNT[1];
78*4882a593Smuzhiyun	cmp	QFREEZE_COUNT[1], A jne check_frozen_completions;
79*4882a593Smuzhiyun	and	SEQ_FLAGS2, ~SELECTOUT_QFROZEN;
80*4882a593Smuzhiyun	jmp	check_waiting_list;
81*4882a593Smuzhiyuncheck_frozen_completions:
82*4882a593Smuzhiyun	test	SSTAT0, SELDO|SELINGO jnz idle_loop_checkbus;
83*4882a593SmuzhiyunBEGIN_CRITICAL;
84*4882a593Smuzhiyun	/*
85*4882a593Smuzhiyun	 * If we have completions stalled waiting for the qfreeze
86*4882a593Smuzhiyun	 * to take effect, move them over to the complete_scb list
87*4882a593Smuzhiyun	 * now that no selections are pending.
88*4882a593Smuzhiyun	 */
89*4882a593Smuzhiyun	cmp	COMPLETE_ON_QFREEZE_HEAD[1],SCB_LIST_NULL je idle_loop_checkbus;
90*4882a593Smuzhiyun	/*
91*4882a593Smuzhiyun	 * Find the end of the qfreeze list.  The first element has
92*4882a593Smuzhiyun	 * to be treated specially.
93*4882a593Smuzhiyun	 */
94*4882a593Smuzhiyun	bmov	SCBPTR, COMPLETE_ON_QFREEZE_HEAD, 2;
95*4882a593Smuzhiyun	cmp 	SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je join_lists;
96*4882a593Smuzhiyun	/*
97*4882a593Smuzhiyun	 * Now the normal loop.
98*4882a593Smuzhiyun	 */
99*4882a593Smuzhiyun	bmov	SCBPTR, SCB_NEXT_COMPLETE, 2;
100*4882a593Smuzhiyun	cmp 	SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . - 1;
101*4882a593Smuzhiyunjoin_lists:
102*4882a593Smuzhiyun	bmov	SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
103*4882a593Smuzhiyun	bmov	COMPLETE_SCB_HEAD, COMPLETE_ON_QFREEZE_HEAD, 2;
104*4882a593Smuzhiyun	mvi	COMPLETE_ON_QFREEZE_HEAD[1], SCB_LIST_NULL;
105*4882a593Smuzhiyun	jmp	idle_loop_checkbus;
106*4882a593Smuzhiyuncheck_waiting_list:
107*4882a593Smuzhiyun	cmp	WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus;
108*4882a593Smuzhiyun	/*
109*4882a593Smuzhiyun	 * ENSELO is cleared by a SELDO, so we must test for SELDO
110*4882a593Smuzhiyun	 * one last time.
111*4882a593Smuzhiyun	 */
112*4882a593Smuzhiyun	test	SSTAT0, SELDO jnz select_out;
113*4882a593Smuzhiyun	call	start_selection;
114*4882a593Smuzhiyunidle_loop_checkbus:
115*4882a593Smuzhiyun	test	SSTAT0, SELDO jnz select_out;
116*4882a593SmuzhiyunEND_CRITICAL;
117*4882a593Smuzhiyun	test	SSTAT0, SELDI jnz select_in;
118*4882a593Smuzhiyun	test	SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq;
119*4882a593Smuzhiyun	test	SCSISIGO, ATNO jz idle_loop_check_nonpackreq;
120*4882a593Smuzhiyun	call	unexpected_nonpkt_phase_find_ctxt;
121*4882a593Smuzhiyunidle_loop_check_nonpackreq:
122*4882a593Smuzhiyun	test	SSTAT2, NONPACKREQ jz . + 2;
123*4882a593Smuzhiyun	call	unexpected_nonpkt_phase_find_ctxt;
124*4882a593Smuzhiyun	if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
125*4882a593Smuzhiyun		/*
126*4882a593Smuzhiyun		 * On Rev A. hardware, the busy LED is only
127*4882a593Smuzhiyun		 * turned on automaically during selections
128*4882a593Smuzhiyun		 * and re-selections.  Make the LED status
129*4882a593Smuzhiyun		 * more useful by forcing it to be on so
130*4882a593Smuzhiyun		 * long as one of our data FIFOs is active.
131*4882a593Smuzhiyun		 */
132*4882a593Smuzhiyun		and	A, FIFO0FREE|FIFO1FREE, DFFSTAT;
133*4882a593Smuzhiyun		cmp	A, FIFO0FREE|FIFO1FREE jne . + 3;
134*4882a593Smuzhiyun		and	SBLKCTL, ~DIAGLEDEN|DIAGLEDON;
135*4882a593Smuzhiyun		jmp	. + 2;
136*4882a593Smuzhiyun		or	SBLKCTL, DIAGLEDEN|DIAGLEDON;
137*4882a593Smuzhiyun	}
138*4882a593Smuzhiyun	call	idle_loop_gsfifo_in_scsi_mode;
139*4882a593Smuzhiyun	call	idle_loop_service_fifos;
140*4882a593Smuzhiyun	call	idle_loop_cchan;
141*4882a593Smuzhiyun	jmp	idle_loop;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyunidle_loop_gsfifo:
144*4882a593Smuzhiyun	SET_MODE(M_SCSI, M_SCSI)
145*4882a593SmuzhiyunBEGIN_CRITICAL;
146*4882a593Smuzhiyunidle_loop_gsfifo_in_scsi_mode:
147*4882a593Smuzhiyun	test	LQISTAT2, LQIGSAVAIL jz return;
148*4882a593Smuzhiyun	/*
149*4882a593Smuzhiyun	 * We have received good status for this transaction.  There may
150*4882a593Smuzhiyun	 * still be data in our FIFOs draining to the host.  Complete
151*4882a593Smuzhiyun	 * the SCB only if all data has transferred to the host.
152*4882a593Smuzhiyun	 */
153*4882a593Smuzhiyungood_status_IU_done:
154*4882a593Smuzhiyun	bmov	SCBPTR, GSFIFO, 2;
155*4882a593Smuzhiyun	clr	SCB_SCSI_STATUS;
156*4882a593Smuzhiyun	/*
157*4882a593Smuzhiyun	 * If a command completed before an attempted task management
158*4882a593Smuzhiyun	 * function completed, notify the host after disabling any
159*4882a593Smuzhiyun	 * pending select-outs.
160*4882a593Smuzhiyun	 */
161*4882a593Smuzhiyun	test	SCB_TASK_MANAGEMENT, 0xFF jz gsfifo_complete_normally;
162*4882a593Smuzhiyun	test	SSTAT0, SELDO|SELINGO jnz . + 2;
163*4882a593Smuzhiyun	and	SCSISEQ0, ~ENSELO;
164*4882a593Smuzhiyun	SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY)
165*4882a593Smuzhiyungsfifo_complete_normally:
166*4882a593Smuzhiyun	or	SCB_CONTROL, STATUS_RCVD;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun	/*
169*4882a593Smuzhiyun	 * Since this status did not consume a FIFO, we have to
170*4882a593Smuzhiyun	 * be a bit more dilligent in how we check for FIFOs pertaining
171*4882a593Smuzhiyun	 * to this transaction.  There are two states that a FIFO still
172*4882a593Smuzhiyun	 * transferring data may be in.
173*4882a593Smuzhiyun	 *
174*4882a593Smuzhiyun	 * 1) Configured and draining to the host, with a FIFO handler.
175*4882a593Smuzhiyun	 * 2) Pending cfg4data, fifo not empty.
176*4882a593Smuzhiyun	 *
177*4882a593Smuzhiyun	 * Case 1 can be detected by noticing a non-zero FIFO active
178*4882a593Smuzhiyun	 * count in the SCB.  In this case, we allow the routine servicing
179*4882a593Smuzhiyun	 * the FIFO to complete the SCB.
180*4882a593Smuzhiyun	 *
181*4882a593Smuzhiyun	 * Case 2 implies either a pending or yet to occur save data
182*4882a593Smuzhiyun	 * pointers for this same context in the other FIFO.  So, if
183*4882a593Smuzhiyun	 * we detect case 1, we will properly defer the post of the SCB
184*4882a593Smuzhiyun	 * and achieve the desired result.  The pending cfg4data will
185*4882a593Smuzhiyun	 * notice that status has been received and complete the SCB.
186*4882a593Smuzhiyun	 */
187*4882a593Smuzhiyun	test	SCB_FIFO_USE_COUNT, 0xFF jnz idle_loop_gsfifo_in_scsi_mode;
188*4882a593Smuzhiyun	call	complete;
189*4882a593SmuzhiyunEND_CRITICAL;
190*4882a593Smuzhiyun	jmp	idle_loop_gsfifo_in_scsi_mode;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyunidle_loop_service_fifos:
193*4882a593Smuzhiyun	SET_MODE(M_DFF0, M_DFF0)
194*4882a593SmuzhiyunBEGIN_CRITICAL;
195*4882a593Smuzhiyun	test	LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo;
196*4882a593Smuzhiyun	call	longjmp;
197*4882a593SmuzhiyunEND_CRITICAL;
198*4882a593Smuzhiyunidle_loop_next_fifo:
199*4882a593Smuzhiyun	SET_MODE(M_DFF1, M_DFF1)
200*4882a593SmuzhiyunBEGIN_CRITICAL;
201*4882a593Smuzhiyun	test	LONGJMP_ADDR[1], INVALID_ADDR jz longjmp;
202*4882a593SmuzhiyunEND_CRITICAL;
203*4882a593Smuzhiyunreturn:
204*4882a593Smuzhiyun	ret;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyunidle_loop_cchan:
207*4882a593Smuzhiyun	SET_MODE(M_CCHAN, M_CCHAN)
208*4882a593Smuzhiyun	test	QOFF_CTLSTA, HS_MAILBOX_ACT jz	hs_mailbox_empty;
209*4882a593Smuzhiyun	or	QOFF_CTLSTA, HS_MAILBOX_ACT;
210*4882a593Smuzhiyun	mov	LOCAL_HS_MAILBOX, HS_MAILBOX;
211*4882a593Smuzhiyunhs_mailbox_empty:
212*4882a593SmuzhiyunBEGIN_CRITICAL;
213*4882a593Smuzhiyun	test	CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle;
214*4882a593Smuzhiyun	test	CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog;
215*4882a593Smuzhiyun	test	CCSCBCTL, CCSCBDONE jz return;
216*4882a593Smuzhiyun	/* FALLTHROUGH */
217*4882a593Smuzhiyunscbdma_tohost_done:
218*4882a593Smuzhiyun	test	CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone;
219*4882a593Smuzhiyun	/*
220*4882a593Smuzhiyun	 * An SCB has been successfully uploaded to the host.
221*4882a593Smuzhiyun	 * If the SCB was uploaded for some reason other than
222*4882a593Smuzhiyun	 * bad SCSI status (currently only for underruns), we
223*4882a593Smuzhiyun	 * queue the SCB for normal completion.  Otherwise, we
224*4882a593Smuzhiyun	 * wait until any select-out activity has halted, and
225*4882a593Smuzhiyun	 * then queue the completion.
226*4882a593Smuzhiyun	 */
227*4882a593Smuzhiyun	and	CCSCBCTL, ~(CCARREN|CCSCBEN);
228*4882a593Smuzhiyun	bmov	COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
229*4882a593Smuzhiyun	cmp	SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . + 2;
230*4882a593Smuzhiyun	mvi	COMPLETE_DMA_SCB_TAIL[1], SCB_LIST_NULL;
231*4882a593Smuzhiyun	test	SCB_SCSI_STATUS, 0xff jz scbdma_queue_completion;
232*4882a593Smuzhiyun	bmov	SCB_NEXT_COMPLETE, COMPLETE_ON_QFREEZE_HEAD, 2;
233*4882a593Smuzhiyun	bmov	COMPLETE_ON_QFREEZE_HEAD, SCBPTR, 2 ret;
234*4882a593Smuzhiyunscbdma_queue_completion:
235*4882a593Smuzhiyun	bmov	SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
236*4882a593Smuzhiyun	bmov	COMPLETE_SCB_HEAD, SCBPTR, 2 ret;
237*4882a593Smuzhiyunfill_qoutfifo_dmadone:
238*4882a593Smuzhiyun	and	CCSCBCTL, ~(CCARREN|CCSCBEN);
239*4882a593Smuzhiyun	call	qoutfifo_updated;
240*4882a593Smuzhiyun	mvi	COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL;
241*4882a593Smuzhiyun	bmov	QOUTFIFO_NEXT_ADDR, SCBHADDR, 4;
242*4882a593Smuzhiyun	test	QOFF_CTLSTA, SDSCB_ROLLOVR jz return;
243*4882a593Smuzhiyun	bmov	QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4;
244*4882a593Smuzhiyun	xor	QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID_TOGGLE ret;
245*4882a593SmuzhiyunEND_CRITICAL;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyunqoutfifo_updated:
248*4882a593Smuzhiyun	/*
249*4882a593Smuzhiyun	 * If there are more commands waiting to be dma'ed
250*4882a593Smuzhiyun	 * to the host, always coalesce.  Otherwise honor the
251*4882a593Smuzhiyun	 * host's wishes.
252*4882a593Smuzhiyun	 */
253*4882a593Smuzhiyun	cmp	COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count;
254*4882a593Smuzhiyun	cmp	COMPLETE_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count;
255*4882a593Smuzhiyun	test	LOCAL_HS_MAILBOX, ENINT_COALESCE jz issue_cmdcmplt;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun	/*
258*4882a593Smuzhiyun	 * If we have relatively few commands outstanding, don't
259*4882a593Smuzhiyun	 * bother waiting for another command to complete.
260*4882a593Smuzhiyun	 */
261*4882a593Smuzhiyun	test	CMDS_PENDING[1], 0xFF jnz coalesce_by_count;
262*4882a593Smuzhiyun	/* Add -1 so that jnc means <= not just < */
263*4882a593Smuzhiyun	add	A, -1, INT_COALESCING_MINCMDS;
264*4882a593Smuzhiyun	add	NONE, A, CMDS_PENDING;
265*4882a593Smuzhiyun	jnc	issue_cmdcmplt;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun	/*
268*4882a593Smuzhiyun	 * If coalescing, only coalesce up to the limit
269*4882a593Smuzhiyun	 * provided by the host driver.
270*4882a593Smuzhiyun	 */
271*4882a593Smuzhiyuncoalesce_by_count:
272*4882a593Smuzhiyun	mov	A, INT_COALESCING_MAXCMDS;
273*4882a593Smuzhiyun	add	NONE, A, INT_COALESCING_CMDCOUNT;
274*4882a593Smuzhiyun	jc	issue_cmdcmplt;
275*4882a593Smuzhiyun	/*
276*4882a593Smuzhiyun	 * If the timer is not currently active,
277*4882a593Smuzhiyun	 * fire it up.
278*4882a593Smuzhiyun	 */
279*4882a593Smuzhiyun	test	INTCTL, SWTMINTMASK jz return;
280*4882a593Smuzhiyun	bmov	SWTIMER, INT_COALESCING_TIMER, 2;
281*4882a593Smuzhiyun	mvi	CLRSEQINTSTAT, CLRSEQ_SWTMRTO;
282*4882a593Smuzhiyun	or	INTCTL, SWTMINTEN|SWTIMER_START;
283*4882a593Smuzhiyun	and	INTCTL, ~SWTMINTMASK ret;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyunissue_cmdcmplt:
286*4882a593Smuzhiyun	mvi	INTSTAT, CMDCMPLT;
287*4882a593Smuzhiyun	clr	INT_COALESCING_CMDCOUNT;
288*4882a593Smuzhiyun	or	INTCTL, SWTMINTMASK ret;
289*4882a593Smuzhiyun
290*4882a593SmuzhiyunBEGIN_CRITICAL;
291*4882a593Smuzhiyunfetch_new_scb_inprog:
292*4882a593Smuzhiyun	test	CCSCBCTL, ARRDONE jz return;
293*4882a593Smuzhiyunfetch_new_scb_done:
294*4882a593Smuzhiyun	and	CCSCBCTL, ~(CCARREN|CCSCBEN);
295*4882a593Smuzhiyun	clr	A;
296*4882a593Smuzhiyun	add	CMDS_PENDING, 1;
297*4882a593Smuzhiyun	adc	CMDS_PENDING[1], A;
298*4882a593Smuzhiyun	if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
299*4882a593Smuzhiyun		/*
300*4882a593Smuzhiyun		 * "Short Luns" are not placed into outgoing LQ
301*4882a593Smuzhiyun		 * packets in the correct byte order.  Use a full
302*4882a593Smuzhiyun		 * sized lun field instead and fill it with the
303*4882a593Smuzhiyun		 * one byte of lun information we support.
304*4882a593Smuzhiyun		 */
305*4882a593Smuzhiyun		mov	SCB_PKT_LUN[6], SCB_LUN;
306*4882a593Smuzhiyun	}
307*4882a593Smuzhiyun	/*
308*4882a593Smuzhiyun	 * The FIFO use count field is shared with the
309*4882a593Smuzhiyun	 * tag set by the host so that our SCB dma engine
310*4882a593Smuzhiyun	 * knows the correct location to store the SCB.
311*4882a593Smuzhiyun	 * Set it to zero before processing the SCB.
312*4882a593Smuzhiyun	 */
313*4882a593Smuzhiyun	clr	SCB_FIFO_USE_COUNT;
314*4882a593Smuzhiyun	/* Update the next SCB address to download. */
315*4882a593Smuzhiyun	bmov	NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4;
316*4882a593Smuzhiyun	/*
317*4882a593Smuzhiyun	 * NULL out the SCB links since these fields
318*4882a593Smuzhiyun	 * occupy the same location as SCB_NEXT_SCB_BUSADDR.
319*4882a593Smuzhiyun	 */
320*4882a593Smuzhiyun	mvi	SCB_NEXT[1], SCB_LIST_NULL;
321*4882a593Smuzhiyun	mvi	SCB_NEXT2[1], SCB_LIST_NULL;
322*4882a593Smuzhiyun	/* Increment our position in the QINFIFO. */
323*4882a593Smuzhiyun	mov	NONE, SNSCB_QOFF;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun	/*
326*4882a593Smuzhiyun	 * Save SCBID of this SCB in REG0 since
327*4882a593Smuzhiyun	 * SCBPTR will be clobbered during target
328*4882a593Smuzhiyun	 * list updates.  We also record the SCB's
329*4882a593Smuzhiyun	 * flags so that we can refer to them even
330*4882a593Smuzhiyun	 * after SCBPTR has been changed.
331*4882a593Smuzhiyun	 */
332*4882a593Smuzhiyun	bmov	REG0, SCBPTR, 2;
333*4882a593Smuzhiyun	mov	A, SCB_CONTROL;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun	/*
336*4882a593Smuzhiyun	 * Find the tail SCB of the execution queue
337*4882a593Smuzhiyun	 * for this target.
338*4882a593Smuzhiyun	 */
339*4882a593Smuzhiyun	shr	SINDEX, 3, SCB_SCSIID;
340*4882a593Smuzhiyun	and	SINDEX, ~0x1;
341*4882a593Smuzhiyun	mvi	SINDEX[1], (WAITING_SCB_TAILS >> 8);
342*4882a593Smuzhiyun	bmov	DINDEX, SINDEX, 2;
343*4882a593Smuzhiyun	bmov	SCBPTR, SINDIR, 2;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun	/*
346*4882a593Smuzhiyun	 * Update the tail to point to the new SCB.
347*4882a593Smuzhiyun	 */
348*4882a593Smuzhiyun	bmov	DINDIR, REG0, 2;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun	/*
351*4882a593Smuzhiyun	 * If the queue was empty, queue this SCB as
352*4882a593Smuzhiyun	 * the first for this target.
353*4882a593Smuzhiyun	 */
354*4882a593Smuzhiyun	cmp	SCBPTR[1], SCB_LIST_NULL je first_new_target_scb;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun	/*
357*4882a593Smuzhiyun	 * SCBs that want to send messages must always be
358*4882a593Smuzhiyun	 * at the head of their per-target queue so that
359*4882a593Smuzhiyun	 * ATN can be asserted even if the current
360*4882a593Smuzhiyun	 * negotiation agreement is packetized.  If the
361*4882a593Smuzhiyun	 * target queue is empty, the SCB can be queued
362*4882a593Smuzhiyun	 * immediately.  If the queue is not empty, we must
363*4882a593Smuzhiyun	 * wait for it to empty before entering this SCB
364*4882a593Smuzhiyun	 * into the waiting for selection queue.  Otherwise
365*4882a593Smuzhiyun	 * our batching and round-robin selection scheme
366*4882a593Smuzhiyun	 * could allow commands to be queued out of order.
367*4882a593Smuzhiyun	 * To simplify the implementation, we stop pulling
368*4882a593Smuzhiyun	 * new commands from the host until the MK_MESSAGE
369*4882a593Smuzhiyun	 * SCB can be queued to the waiting for selection
370*4882a593Smuzhiyun	 * list.
371*4882a593Smuzhiyun	 */
372*4882a593Smuzhiyun	test	A, MK_MESSAGE jz batch_scb;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun	/*
375*4882a593Smuzhiyun	 * If the last SCB is also a MK_MESSAGE SCB, then
376*4882a593Smuzhiyun	 * order is preserved even if we batch.
377*4882a593Smuzhiyun	 */
378*4882a593Smuzhiyun	test	SCB_CONTROL, MK_MESSAGE jz batch_scb;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun	/*
381*4882a593Smuzhiyun	 * Defer this SCB and stop fetching new SCBs until
382*4882a593Smuzhiyun	 * it can be queued.  Since the SCB_SCSIID of the
383*4882a593Smuzhiyun	 * tail SCB must be the same as that of the newly
384*4882a593Smuzhiyun	 * queued SCB, there is no need to restore the SCBID
385*4882a593Smuzhiyun	 * here.
386*4882a593Smuzhiyun	 */
387*4882a593Smuzhiyun	or	SEQ_FLAGS2, PENDING_MK_MESSAGE;
388*4882a593Smuzhiyun	bmov	MK_MESSAGE_SCB, REG0, 2;
389*4882a593Smuzhiyun	mov	MK_MESSAGE_SCSIID, SCB_SCSIID ret;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyunbatch_scb:
392*4882a593Smuzhiyun	/*
393*4882a593Smuzhiyun	 * Otherwise just update the previous tail SCB to
394*4882a593Smuzhiyun	 * point to the new tail.
395*4882a593Smuzhiyun	 */
396*4882a593Smuzhiyun	bmov	SCB_NEXT, REG0, 2 ret;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyunfirst_new_target_scb:
399*4882a593Smuzhiyun	/*
400*4882a593Smuzhiyun	 * Append SCB to the tail of the waiting for
401*4882a593Smuzhiyun	 * selection list.
402*4882a593Smuzhiyun	 */
403*4882a593Smuzhiyun	cmp	WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb;
404*4882a593Smuzhiyun	bmov	SCBPTR, WAITING_TID_TAIL, 2;
405*4882a593Smuzhiyun	bmov	SCB_NEXT2, REG0, 2;
406*4882a593Smuzhiyun	bmov	WAITING_TID_TAIL, REG0, 2 ret;
407*4882a593Smuzhiyunfirst_new_scb:
408*4882a593Smuzhiyun	/*
409*4882a593Smuzhiyun	 * Whole list is empty, so the head of
410*4882a593Smuzhiyun	 * the list must be initialized too.
411*4882a593Smuzhiyun	 */
412*4882a593Smuzhiyun	bmov	WAITING_TID_HEAD, REG0, 2;
413*4882a593Smuzhiyun	bmov	WAITING_TID_TAIL, REG0, 2 ret;
414*4882a593SmuzhiyunEND_CRITICAL;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyunscbdma_idle:
417*4882a593Smuzhiyun	/*
418*4882a593Smuzhiyun	 * Don't bother downloading new SCBs to execute
419*4882a593Smuzhiyun	 * if select-outs are currently frozen or we have
420*4882a593Smuzhiyun	 * a MK_MESSAGE SCB waiting to enter the queue.
421*4882a593Smuzhiyun	 */
422*4882a593Smuzhiyun	test	SEQ_FLAGS2, SELECTOUT_QFROZEN|PENDING_MK_MESSAGE
423*4882a593Smuzhiyun		jnz scbdma_no_new_scbs;
424*4882a593SmuzhiyunBEGIN_CRITICAL;
425*4882a593Smuzhiyun	test	QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb;
426*4882a593Smuzhiyunscbdma_no_new_scbs:
427*4882a593Smuzhiyun	cmp	COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb;
428*4882a593Smuzhiyun	cmp	COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return;
429*4882a593Smuzhiyun	/* FALLTHROUGH */
430*4882a593Smuzhiyunfill_qoutfifo:
431*4882a593Smuzhiyun	/*
432*4882a593Smuzhiyun	 * Keep track of the SCBs we are dmaing just
433*4882a593Smuzhiyun	 * in case the DMA fails or is aborted.
434*4882a593Smuzhiyun	 */
435*4882a593Smuzhiyun	bmov	COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2;
436*4882a593Smuzhiyun	mvi	CCSCBCTL, CCSCBRESET;
437*4882a593Smuzhiyun	bmov	SCBHADDR, QOUTFIFO_NEXT_ADDR, 4;
438*4882a593Smuzhiyun	mov	A, QOUTFIFO_NEXT_ADDR;
439*4882a593Smuzhiyun	bmov	SCBPTR, COMPLETE_SCB_HEAD, 2;
440*4882a593Smuzhiyunfill_qoutfifo_loop:
441*4882a593Smuzhiyun	bmov	CCSCBRAM, SCBPTR, 2;
442*4882a593Smuzhiyun	mov	CCSCBRAM, SCB_SGPTR[0];
443*4882a593Smuzhiyun	mov	CCSCBRAM, QOUTFIFO_ENTRY_VALID_TAG;
444*4882a593Smuzhiyun	mov	NONE, SDSCB_QOFF;
445*4882a593Smuzhiyun	inc	INT_COALESCING_CMDCOUNT;
446*4882a593Smuzhiyun	add	CMDS_PENDING, -1;
447*4882a593Smuzhiyun	adc	CMDS_PENDING[1], -1;
448*4882a593Smuzhiyun	cmp	SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done;
449*4882a593Smuzhiyun	cmp	CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done;
450*4882a593Smuzhiyun	test	QOFF_CTLSTA, SDSCB_ROLLOVR jnz fill_qoutfifo_done;
451*4882a593Smuzhiyun	/*
452*4882a593Smuzhiyun	 * Don't cross an ADB or Cachline boundary when DMA'ing
453*4882a593Smuzhiyun	 * completion entries.  In PCI mode, at least in 32/33
454*4882a593Smuzhiyun	 * configurations, the SCB DMA engine may lose its place
455*4882a593Smuzhiyun	 * in the data-stream should the target force a retry on
456*4882a593Smuzhiyun	 * something other than an 8byte aligned boundary. In
457*4882a593Smuzhiyun	 * PCI-X mode, we do this to avoid split transactions since
458*4882a593Smuzhiyun	 * many chipsets seem to be unable to format proper split
459*4882a593Smuzhiyun	 * completions to continue the data transfer.
460*4882a593Smuzhiyun	 */
461*4882a593Smuzhiyun	add	SINDEX, A, CCSCBADDR;
462*4882a593Smuzhiyun	test	SINDEX, CACHELINE_MASK jz fill_qoutfifo_done;
463*4882a593Smuzhiyun	bmov	SCBPTR, SCB_NEXT_COMPLETE, 2;
464*4882a593Smuzhiyun	jmp	fill_qoutfifo_loop;
465*4882a593Smuzhiyunfill_qoutfifo_done:
466*4882a593Smuzhiyun	mov	SCBHCNT, CCSCBADDR;
467*4882a593Smuzhiyun	mvi	CCSCBCTL, CCSCBEN|CCSCBRESET;
468*4882a593Smuzhiyun	bmov	COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
469*4882a593Smuzhiyun	mvi	SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyunfetch_new_scb:
472*4882a593Smuzhiyun	bmov	SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4;
473*4882a593Smuzhiyun	mvi	CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb;
474*4882a593Smuzhiyundma_complete_scb:
475*4882a593Smuzhiyun	bmov	SCBPTR, COMPLETE_DMA_SCB_HEAD, 2;
476*4882a593Smuzhiyun	bmov	SCBHADDR, SCB_BUSADDR, 4;
477*4882a593Smuzhiyun	mvi	CCARREN|CCSCBEN|CCSCBRESET jmp dma_scb;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun/*
480*4882a593Smuzhiyun * Either post or fetch an SCB from host memory.  The caller
481*4882a593Smuzhiyun * is responsible for polling for transfer completion.
482*4882a593Smuzhiyun *
483*4882a593Smuzhiyun * Prerequisits: Mode == M_CCHAN
484*4882a593Smuzhiyun *		 SINDEX contains CCSCBCTL flags
485*4882a593Smuzhiyun *		 SCBHADDR set to Host SCB address
486*4882a593Smuzhiyun *		 SCBPTR set to SCB src location on "push" operations
487*4882a593Smuzhiyun */
488*4882a593SmuzhiyunSET_SRC_MODE	M_CCHAN;
489*4882a593SmuzhiyunSET_DST_MODE	M_CCHAN;
490*4882a593Smuzhiyundma_scb:
491*4882a593Smuzhiyun	mvi	SCBHCNT, SCB_TRANSFER_SIZE;
492*4882a593Smuzhiyun	mov	CCSCBCTL, SINDEX ret;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyunsetjmp:
495*4882a593Smuzhiyun	/*
496*4882a593Smuzhiyun	 * At least on the A, a return in the same
497*4882a593Smuzhiyun	 * instruction as the bmov results in a return
498*4882a593Smuzhiyun	 * to the caller, not to the new address at the
499*4882a593Smuzhiyun	 * top of the stack.  Since we want the latter
500*4882a593Smuzhiyun	 * (we use setjmp to register a handler from an
501*4882a593Smuzhiyun	 * interrupt context but not invoke that handler
502*4882a593Smuzhiyun	 * until we return to our idle loop), use a
503*4882a593Smuzhiyun	 * separate ret instruction.
504*4882a593Smuzhiyun	 */
505*4882a593Smuzhiyun	bmov	LONGJMP_ADDR, STACK, 2;
506*4882a593Smuzhiyun	ret;
507*4882a593Smuzhiyunsetjmp_inline:
508*4882a593Smuzhiyun	bmov	LONGJMP_ADDR, STACK, 2;
509*4882a593Smuzhiyunlongjmp:
510*4882a593Smuzhiyun	bmov	STACK, LONGJMP_ADDR, 2 ret;
511*4882a593SmuzhiyunEND_CRITICAL;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun/*************************** Chip Bug Work Arounds ****************************/
514*4882a593Smuzhiyun/*
515*4882a593Smuzhiyun * Must disable interrupts when setting the mode pointer
516*4882a593Smuzhiyun * register as an interrupt occurring mid update will
517*4882a593Smuzhiyun * fail to store the new mode value for restoration on
518*4882a593Smuzhiyun * an iret.
519*4882a593Smuzhiyun */
520*4882a593Smuzhiyunif ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {
521*4882a593Smuzhiyunset_mode_work_around:
522*4882a593Smuzhiyun	mvi	SEQINTCTL, INTVEC1DSL;
523*4882a593Smuzhiyun	mov	MODE_PTR, SINDEX;
524*4882a593Smuzhiyun	clr	SEQINTCTL ret;
525*4882a593Smuzhiyun}
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun
528*4882a593Smuzhiyunif ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
529*4882a593Smuzhiyunset_seqint_work_around:
530*4882a593Smuzhiyun	mov	SEQINTCODE, SINDEX;
531*4882a593Smuzhiyun	mvi	SEQINTCODE, NO_SEQINT ret;
532*4882a593Smuzhiyun}
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun/************************ Packetized LongJmp Routines *************************/
535*4882a593SmuzhiyunSET_SRC_MODE	M_SCSI;
536*4882a593SmuzhiyunSET_DST_MODE	M_SCSI;
537*4882a593Smuzhiyunstart_selection:
538*4882a593SmuzhiyunBEGIN_CRITICAL;
539*4882a593Smuzhiyun	if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) {
540*4882a593Smuzhiyun		/*
541*4882a593Smuzhiyun		 * Razor #494
542*4882a593Smuzhiyun		 * Rev A hardware fails to update LAST/CURR/NEXTSCB
543*4882a593Smuzhiyun		 * correctly after a packetized selection in several
544*4882a593Smuzhiyun		 * situations:
545*4882a593Smuzhiyun		 *
546*4882a593Smuzhiyun		 * 1) If only one command existed in the queue, the
547*4882a593Smuzhiyun		 *    LAST/CURR/NEXTSCB are unchanged.
548*4882a593Smuzhiyun		 *
549*4882a593Smuzhiyun		 * 2) In a non QAS, protocol allowed phase change,
550*4882a593Smuzhiyun		 *    the queue is shifted 1 too far.  LASTSCB is
551*4882a593Smuzhiyun		 *    the last SCB that was correctly processed.
552*4882a593Smuzhiyun		 *
553*4882a593Smuzhiyun		 * 3) In the QAS case, if the full list of commands
554*4882a593Smuzhiyun		 *    was successfully sent, NEXTSCB is NULL and neither
555*4882a593Smuzhiyun		 *    CURRSCB nor LASTSCB can be trusted.  We must
556*4882a593Smuzhiyun		 *    manually walk the list counting MAXCMDCNT elements
557*4882a593Smuzhiyun		 *    to find the last SCB that was sent correctly.
558*4882a593Smuzhiyun		 *
559*4882a593Smuzhiyun		 * To simplify the workaround for this bug in SELDO
560*4882a593Smuzhiyun		 * handling, we initialize LASTSCB prior to enabling
561*4882a593Smuzhiyun		 * selection so we can rely on it even for case #1 above.
562*4882a593Smuzhiyun		 */
563*4882a593Smuzhiyun		bmov	LASTSCB, WAITING_TID_HEAD, 2;
564*4882a593Smuzhiyun	}
565*4882a593Smuzhiyun	bmov	CURRSCB, WAITING_TID_HEAD, 2;
566*4882a593Smuzhiyun	bmov	SCBPTR, WAITING_TID_HEAD, 2;
567*4882a593Smuzhiyun	shr	SELOID, 4, SCB_SCSIID;
568*4882a593Smuzhiyun	/*
569*4882a593Smuzhiyun	 * If we want to send a message to the device, ensure
570*4882a593Smuzhiyun	 * we are selecting with atn regardless of our packetized
571*4882a593Smuzhiyun	 * agreement.  Since SPI4 only allows target reset or PPR
572*4882a593Smuzhiyun	 * messages if this is a packetized connection, the change
573*4882a593Smuzhiyun	 * to our negotiation table entry for this selection will
574*4882a593Smuzhiyun	 * be cleared when the message is acted on.
575*4882a593Smuzhiyun	 */
576*4882a593Smuzhiyun	test	SCB_CONTROL, MK_MESSAGE jz . + 3;
577*4882a593Smuzhiyun	mov	NEGOADDR, SELOID;
578*4882a593Smuzhiyun	or	NEGCONOPTS, ENAUTOATNO;
579*4882a593Smuzhiyun	or	SCSISEQ0, ENSELO ret;
580*4882a593SmuzhiyunEND_CRITICAL;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun/*
583*4882a593Smuzhiyun * Allocate a FIFO for a non-packetized transaction.
584*4882a593Smuzhiyun * In RevA hardware, both FIFOs must be free before we
585*4882a593Smuzhiyun * can allocate a FIFO for a non-packetized transaction.
586*4882a593Smuzhiyun */
587*4882a593Smuzhiyunallocate_fifo_loop:
588*4882a593Smuzhiyun	/*
589*4882a593Smuzhiyun	 * Do whatever work is required to free a FIFO.
590*4882a593Smuzhiyun	 */
591*4882a593Smuzhiyun	call	idle_loop_service_fifos;
592*4882a593Smuzhiyun	SET_MODE(M_SCSI, M_SCSI)
593*4882a593Smuzhiyunallocate_fifo:
594*4882a593Smuzhiyun	if ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0) {
595*4882a593Smuzhiyun		and	A, FIFO0FREE|FIFO1FREE, DFFSTAT;
596*4882a593Smuzhiyun		cmp	A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop;
597*4882a593Smuzhiyun	} else {
598*4882a593Smuzhiyun		test	DFFSTAT, FIFO1FREE jnz allocate_fifo1;
599*4882a593Smuzhiyun		test	DFFSTAT, FIFO0FREE jz allocate_fifo_loop;
600*4882a593Smuzhiyun		mvi	DFFSTAT, B_CURRFIFO_0;
601*4882a593Smuzhiyun		SET_MODE(M_DFF0, M_DFF0)
602*4882a593Smuzhiyun		bmov	SCBPTR, ALLOCFIFO_SCBPTR, 2 ret;
603*4882a593Smuzhiyun	}
604*4882a593SmuzhiyunSET_SRC_MODE	M_SCSI;
605*4882a593SmuzhiyunSET_DST_MODE	M_SCSI;
606*4882a593Smuzhiyunallocate_fifo1:
607*4882a593Smuzhiyun	mvi	DFFSTAT, CURRFIFO_1;
608*4882a593Smuzhiyun	SET_MODE(M_DFF1, M_DFF1)
609*4882a593Smuzhiyun	bmov	SCBPTR, ALLOCFIFO_SCBPTR, 2 ret;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun/*
612*4882a593Smuzhiyun * We have been reselected as an initiator
613*4882a593Smuzhiyun * or selected as a target.
614*4882a593Smuzhiyun */
615*4882a593SmuzhiyunSET_SRC_MODE	M_SCSI;
616*4882a593SmuzhiyunSET_DST_MODE	M_SCSI;
617*4882a593Smuzhiyunselect_in:
618*4882a593Smuzhiyun	if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
619*4882a593Smuzhiyun		/*
620*4882a593Smuzhiyun		 * On Rev A. hardware, the busy LED is only
621*4882a593Smuzhiyun		 * turned on automaically during selections
622*4882a593Smuzhiyun		 * and re-selections.  Make the LED status
623*4882a593Smuzhiyun		 * more useful by forcing it to be on from
624*4882a593Smuzhiyun		 * the point of selection until our idle
625*4882a593Smuzhiyun		 * loop determines that neither of our FIFOs
626*4882a593Smuzhiyun		 * are busy.  This handles the non-packetized
627*4882a593Smuzhiyun		 * case nicely as we will not return to the
628*4882a593Smuzhiyun		 * idle loop until the busfree at the end of
629*4882a593Smuzhiyun		 * each transaction.
630*4882a593Smuzhiyun		 */
631*4882a593Smuzhiyun		or	SBLKCTL, DIAGLEDEN|DIAGLEDON;
632*4882a593Smuzhiyun	}
633*4882a593Smuzhiyun	if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
634*4882a593Smuzhiyun		/*
635*4882a593Smuzhiyun		 * Test to ensure that the bus has not
636*4882a593Smuzhiyun		 * already gone free prior to clearing
637*4882a593Smuzhiyun		 * any stale busfree status.  This avoids
638*4882a593Smuzhiyun		 * a window whereby a busfree just after
639*4882a593Smuzhiyun		 * a selection could be missed.
640*4882a593Smuzhiyun		 */
641*4882a593Smuzhiyun		test	SCSISIGI, BSYI jz . + 2;
642*4882a593Smuzhiyun		mvi	CLRSINT1,CLRBUSFREE;
643*4882a593Smuzhiyun		or	SIMODE1, ENBUSFREE;
644*4882a593Smuzhiyun	}
645*4882a593Smuzhiyun	or	SXFRCTL0, SPIOEN;
646*4882a593Smuzhiyun	and	SAVED_SCSIID, SELID_MASK, SELID;
647*4882a593Smuzhiyun	and	A, OID, IOWNID;
648*4882a593Smuzhiyun	or	SAVED_SCSIID, A;
649*4882a593Smuzhiyun	mvi	CLRSINT0, CLRSELDI;
650*4882a593Smuzhiyun	jmp	ITloop;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun/*
653*4882a593Smuzhiyun * We have successfully selected out.
654*4882a593Smuzhiyun *
655*4882a593Smuzhiyun * Clear SELDO.
656*4882a593Smuzhiyun * Dequeue all SCBs sent from the waiting queue
657*4882a593Smuzhiyun * Requeue all SCBs *not* sent to the tail of the waiting queue
658*4882a593Smuzhiyun * Take Razor #494 into account for above.
659*4882a593Smuzhiyun *
660*4882a593Smuzhiyun * In Packetized Mode:
661*4882a593Smuzhiyun *	Return to the idle loop.  Our interrupt handler will take
662*4882a593Smuzhiyun *	care of any incoming L_Qs.
663*4882a593Smuzhiyun *
664*4882a593Smuzhiyun * In Non-Packetize Mode:
665*4882a593Smuzhiyun *	Continue to our normal state machine.
666*4882a593Smuzhiyun */
667*4882a593SmuzhiyunSET_SRC_MODE	M_SCSI;
668*4882a593SmuzhiyunSET_DST_MODE	M_SCSI;
669*4882a593Smuzhiyunselect_out:
670*4882a593SmuzhiyunBEGIN_CRITICAL;
671*4882a593Smuzhiyun	if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
672*4882a593Smuzhiyun		/*
673*4882a593Smuzhiyun		 * On Rev A. hardware, the busy LED is only
674*4882a593Smuzhiyun		 * turned on automaically during selections
675*4882a593Smuzhiyun		 * and re-selections.  Make the LED status
676*4882a593Smuzhiyun		 * more useful by forcing it to be on from
677*4882a593Smuzhiyun		 * the point of re-selection until our idle
678*4882a593Smuzhiyun		 * loop determines that neither of our FIFOs
679*4882a593Smuzhiyun		 * are busy.  This handles the non-packetized
680*4882a593Smuzhiyun		 * case nicely as we will not return to the
681*4882a593Smuzhiyun		 * idle loop until the busfree at the end of
682*4882a593Smuzhiyun		 * each transaction.
683*4882a593Smuzhiyun		 */
684*4882a593Smuzhiyun		or	SBLKCTL, DIAGLEDEN|DIAGLEDON;
685*4882a593Smuzhiyun	}
686*4882a593Smuzhiyun	/* Clear out all SCBs that have been successfully sent. */
687*4882a593Smuzhiyun	if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) {
688*4882a593Smuzhiyun		/*
689*4882a593Smuzhiyun		 * For packetized, the LQO manager clears ENSELO on
690*4882a593Smuzhiyun		 * the assertion of SELDO.  If we are non-packetized,
691*4882a593Smuzhiyun		 * LASTSCB and CURRSCB are accurate.
692*4882a593Smuzhiyun		 */
693*4882a593Smuzhiyun		test	SCSISEQ0, ENSELO jnz use_lastscb;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun		/*
696*4882a593Smuzhiyun		 * The update is correct for LQOSTAT1 errors.  All
697*4882a593Smuzhiyun		 * but LQOBUSFREE are handled by kernel interrupts.
698*4882a593Smuzhiyun		 * If we see LQOBUSFREE, return to the idle loop.
699*4882a593Smuzhiyun		 * Once we are out of the select_out critical section,
700*4882a593Smuzhiyun		 * the kernel will cleanup the LQOBUSFREE and we will
701*4882a593Smuzhiyun		 * eventually restart the selection if appropriate.
702*4882a593Smuzhiyun		 */
703*4882a593Smuzhiyun		test	LQOSTAT1, LQOBUSFREE jnz idle_loop;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun		/*
706*4882a593Smuzhiyun		 * On a phase change oustside of packet boundaries,
707*4882a593Smuzhiyun		 * LASTSCB points to the currently active SCB context
708*4882a593Smuzhiyun		 * on the bus.
709*4882a593Smuzhiyun		 */
710*4882a593Smuzhiyun		test	LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun		/*
713*4882a593Smuzhiyun		 * If the hardware has traversed the whole list, NEXTSCB
714*4882a593Smuzhiyun		 * will be NULL, CURRSCB and LASTSCB cannot be trusted,
715*4882a593Smuzhiyun		 * but MAXCMDCNT is accurate.  If we stop part way through
716*4882a593Smuzhiyun		 * the list or only had one command to issue, NEXTSCB[1] is
717*4882a593Smuzhiyun		 * not NULL and LASTSCB is the last command to go out.
718*4882a593Smuzhiyun		 */
719*4882a593Smuzhiyun		cmp	NEXTSCB[1], SCB_LIST_NULL jne use_lastscb;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun		/*
722*4882a593Smuzhiyun		 * Brute force walk.
723*4882a593Smuzhiyun		 */
724*4882a593Smuzhiyun		bmov	SCBPTR, WAITING_TID_HEAD, 2;
725*4882a593Smuzhiyun		mvi	SEQINTCTL, INTVEC1DSL;
726*4882a593Smuzhiyun		mvi	MODE_PTR, MK_MODE(M_CFG, M_CFG);
727*4882a593Smuzhiyun		mov	A, MAXCMDCNT;
728*4882a593Smuzhiyun		mvi	MODE_PTR, MK_MODE(M_SCSI, M_SCSI);
729*4882a593Smuzhiyun		clr	SEQINTCTL;
730*4882a593Smuzhiyunfind_lastscb_loop:
731*4882a593Smuzhiyun		dec	A;
732*4882a593Smuzhiyun		test	A, 0xFF jz found_last_sent_scb;
733*4882a593Smuzhiyun		bmov	SCBPTR, SCB_NEXT, 2;
734*4882a593Smuzhiyun		jmp	find_lastscb_loop;
735*4882a593Smuzhiyunuse_lastscb:
736*4882a593Smuzhiyun		bmov	SCBPTR, LASTSCB, 2;
737*4882a593Smuzhiyunfound_last_sent_scb:
738*4882a593Smuzhiyun		bmov	CURRSCB, SCBPTR, 2;
739*4882a593Smuzhiyuncurscb_ww_done:
740*4882a593Smuzhiyun	} else {
741*4882a593Smuzhiyun		bmov	SCBPTR, CURRSCB, 2;
742*4882a593Smuzhiyun	}
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun	/*
745*4882a593Smuzhiyun	 * The whole list made it.  Clear our tail pointer to indicate
746*4882a593Smuzhiyun	 * that the per-target selection queue is now empty.
747*4882a593Smuzhiyun	 */
748*4882a593Smuzhiyun	cmp	SCB_NEXT[1], SCB_LIST_NULL je select_out_clear_tail;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun	/*
751*4882a593Smuzhiyun	 * Requeue any SCBs not sent, to the tail of the waiting Q.
752*4882a593Smuzhiyun	 * We know that neither the per-TID list nor the list of
753*4882a593Smuzhiyun	 * TIDs is empty.  Use this knowledge to our advantage and
754*4882a593Smuzhiyun	 * queue the remainder to the tail of the global execution
755*4882a593Smuzhiyun	 * queue.
756*4882a593Smuzhiyun	 */
757*4882a593Smuzhiyun	bmov	REG0, SCB_NEXT, 2;
758*4882a593Smuzhiyunselect_out_queue_remainder:
759*4882a593Smuzhiyun	bmov	SCBPTR, WAITING_TID_TAIL, 2;
760*4882a593Smuzhiyun	bmov	SCB_NEXT2, REG0, 2;
761*4882a593Smuzhiyun	bmov	WAITING_TID_TAIL, REG0, 2;
762*4882a593Smuzhiyun	jmp	select_out_inc_tid_q;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyunselect_out_clear_tail:
765*4882a593Smuzhiyun	/*
766*4882a593Smuzhiyun	 * Queue any pending MK_MESSAGE SCB for this target now
767*4882a593Smuzhiyun	 * that the queue is empty.
768*4882a593Smuzhiyun	 */
769*4882a593Smuzhiyun	test	SEQ_FLAGS2, PENDING_MK_MESSAGE jz select_out_no_mk_message_scb;
770*4882a593Smuzhiyun	mov	A, MK_MESSAGE_SCSIID;
771*4882a593Smuzhiyun	cmp	SCB_SCSIID, A jne select_out_no_mk_message_scb;
772*4882a593Smuzhiyun	and	SEQ_FLAGS2, ~PENDING_MK_MESSAGE;
773*4882a593Smuzhiyun	bmov	REG0, MK_MESSAGE_SCB, 2;
774*4882a593Smuzhiyun	jmp select_out_queue_remainder;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyunselect_out_no_mk_message_scb:
777*4882a593Smuzhiyun	/*
778*4882a593Smuzhiyun	 * Clear this target's execution tail and increment the queue.
779*4882a593Smuzhiyun	 */
780*4882a593Smuzhiyun	shr	DINDEX, 3, SCB_SCSIID;
781*4882a593Smuzhiyun	or	DINDEX, 1;	/* Want only the second byte */
782*4882a593Smuzhiyun	mvi	DINDEX[1], ((WAITING_SCB_TAILS) >> 8);
783*4882a593Smuzhiyun	mvi	DINDIR, SCB_LIST_NULL;
784*4882a593Smuzhiyunselect_out_inc_tid_q:
785*4882a593Smuzhiyun	bmov	SCBPTR, WAITING_TID_HEAD, 2;
786*4882a593Smuzhiyun	bmov	WAITING_TID_HEAD, SCB_NEXT2, 2;
787*4882a593Smuzhiyun	cmp	WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2;
788*4882a593Smuzhiyun	mvi	WAITING_TID_TAIL[1], SCB_LIST_NULL;
789*4882a593Smuzhiyun	bmov	SCBPTR, CURRSCB, 2;
790*4882a593Smuzhiyun	mvi	CLRSINT0, CLRSELDO;
791*4882a593Smuzhiyun	test	LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_mode_cleared;
792*4882a593Smuzhiyun	test	LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_mode_cleared;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun	/*
795*4882a593Smuzhiyun	 * If this is a packetized connection, return to our
796*4882a593Smuzhiyun	 * idle_loop and let our interrupt handler deal with
797*4882a593Smuzhiyun	 * any connection setup/teardown issues.  The only
798*4882a593Smuzhiyun	 * exceptions are the case of MK_MESSAGE and task management
799*4882a593Smuzhiyun	 * SCBs.
800*4882a593Smuzhiyun	 */
801*4882a593Smuzhiyun	if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) {
802*4882a593Smuzhiyun		/*
803*4882a593Smuzhiyun		 * In the A, the LQO manager transitions to LQOSTOP0 even if
804*4882a593Smuzhiyun		 * we have selected out with ATN asserted and the target
805*4882a593Smuzhiyun		 * REQs in a non-packet phase.
806*4882a593Smuzhiyun		 */
807*4882a593Smuzhiyun		test 	SCB_CONTROL, MK_MESSAGE jz select_out_no_message;
808*4882a593Smuzhiyun		test	SCSISIGO, ATNO jnz select_out_non_packetized;
809*4882a593Smuzhiyunselect_out_no_message:
810*4882a593Smuzhiyun	}
811*4882a593Smuzhiyun	test	LQOSTAT2, LQOSTOP0 jz select_out_non_packetized;
812*4882a593Smuzhiyun	test	SCB_TASK_MANAGEMENT, 0xFF jz idle_loop;
813*4882a593Smuzhiyun	SET_SEQINTCODE(TASKMGMT_FUNC_COMPLETE)
814*4882a593Smuzhiyun	jmp	idle_loop;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyunselect_out_non_packetized:
817*4882a593Smuzhiyun	/* Non packetized request. */
818*4882a593Smuzhiyun	and     SCSISEQ0, ~ENSELO;
819*4882a593Smuzhiyun	if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
820*4882a593Smuzhiyun		/*
821*4882a593Smuzhiyun		 * Test to ensure that the bus has not
822*4882a593Smuzhiyun		 * already gone free prior to clearing
823*4882a593Smuzhiyun		 * any stale busfree status.  This avoids
824*4882a593Smuzhiyun		 * a window whereby a busfree just after
825*4882a593Smuzhiyun		 * a selection could be missed.
826*4882a593Smuzhiyun		 */
827*4882a593Smuzhiyun		test	SCSISIGI, BSYI jz . + 2;
828*4882a593Smuzhiyun		mvi	CLRSINT1,CLRBUSFREE;
829*4882a593Smuzhiyun		or	SIMODE1, ENBUSFREE;
830*4882a593Smuzhiyun	}
831*4882a593Smuzhiyun	mov	SAVED_SCSIID, SCB_SCSIID;
832*4882a593Smuzhiyun	mov	SAVED_LUN, SCB_LUN;
833*4882a593Smuzhiyun	mvi	SEQ_FLAGS, NO_CDB_SENT;
834*4882a593SmuzhiyunEND_CRITICAL;
835*4882a593Smuzhiyun	or	SXFRCTL0, SPIOEN;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun	/*
838*4882a593Smuzhiyun	 * As soon as we get a successful selection, the target
839*4882a593Smuzhiyun	 * should go into the message out phase since we have ATN
840*4882a593Smuzhiyun	 * asserted.
841*4882a593Smuzhiyun	 */
842*4882a593Smuzhiyun	mvi	MSG_OUT, MSG_IDENTIFYFLAG;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun	/*
845*4882a593Smuzhiyun	 * Main loop for information transfer phases.  Wait for the
846*4882a593Smuzhiyun	 * target to assert REQ before checking MSG, C/D and I/O for
847*4882a593Smuzhiyun	 * the bus phase.
848*4882a593Smuzhiyun	 */
849*4882a593Smuzhiyunmesgin_phasemis:
850*4882a593SmuzhiyunITloop:
851*4882a593Smuzhiyun	call	phase_lock;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun	mov	A, LASTPHASE;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun	test	A, ~P_DATAIN_DT	jz p_data;
856*4882a593Smuzhiyun	cmp	A,P_COMMAND	je p_command;
857*4882a593Smuzhiyun	cmp	A,P_MESGOUT	je p_mesgout;
858*4882a593Smuzhiyun	cmp	A,P_STATUS	je p_status;
859*4882a593Smuzhiyun	cmp	A,P_MESGIN	je p_mesgin;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun	SET_SEQINTCODE(BAD_PHASE)
862*4882a593Smuzhiyun	jmp	ITloop;			/* Try reading the bus again. */
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun/*
865*4882a593Smuzhiyun * Command phase.  Set up the DMA registers and let 'er rip.
866*4882a593Smuzhiyun */
867*4882a593Smuzhiyunp_command:
868*4882a593Smuzhiyun	test	SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay;
869*4882a593Smuzhiyun	SET_SEQINTCODE(PROTO_VIOLATION)
870*4882a593Smuzhiyunp_command_okay:
871*4882a593Smuzhiyun	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
872*4882a593Smuzhiyun		jnz p_command_allocate_fifo;
873*4882a593Smuzhiyun	/*
874*4882a593Smuzhiyun	 * Command retry.  Free our current FIFO and
875*4882a593Smuzhiyun	 * re-allocate a FIFO so transfer state is
876*4882a593Smuzhiyun	 * reset.
877*4882a593Smuzhiyun	 */
878*4882a593SmuzhiyunSET_SRC_MODE	M_DFF1;
879*4882a593SmuzhiyunSET_DST_MODE	M_DFF1;
880*4882a593Smuzhiyun	mvi	DFFSXFRCTL, RSTCHN|CLRSHCNT;
881*4882a593Smuzhiyun	SET_MODE(M_SCSI, M_SCSI)
882*4882a593Smuzhiyunp_command_allocate_fifo:
883*4882a593Smuzhiyun	bmov	ALLOCFIFO_SCBPTR, SCBPTR, 2;
884*4882a593Smuzhiyun	call	allocate_fifo;
885*4882a593SmuzhiyunSET_SRC_MODE	M_DFF1;
886*4882a593SmuzhiyunSET_DST_MODE	M_DFF1;
887*4882a593Smuzhiyun	add	NONE, -17, SCB_CDB_LEN;
888*4882a593Smuzhiyun	jnc	p_command_embedded;
889*4882a593Smuzhiyunp_command_from_host:
890*4882a593Smuzhiyun	bmov	HADDR[0], SCB_HOST_CDB_PTR, 9;
891*4882a593Smuzhiyun	mvi	SG_CACHE_PRE, LAST_SEG;
892*4882a593Smuzhiyun	mvi	DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
893*4882a593Smuzhiyun	jmp	p_command_xfer;
894*4882a593Smuzhiyunp_command_embedded:
895*4882a593Smuzhiyun	bmov	SHCNT[0], SCB_CDB_LEN,  1;
896*4882a593Smuzhiyun	bmov	DFDAT, SCB_CDB_STORE, 16;
897*4882a593Smuzhiyun	mvi	DFCNTRL, SCSIEN;
898*4882a593Smuzhiyunp_command_xfer:
899*4882a593Smuzhiyun	and	SEQ_FLAGS, ~NO_CDB_SENT;
900*4882a593Smuzhiyun	if ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0) {
901*4882a593Smuzhiyun		/*
902*4882a593Smuzhiyun		 * To speed up CDB delivery in Rev B, all CDB acks
903*4882a593Smuzhiyun		 * are "released" to the output sync as soon as the
904*4882a593Smuzhiyun		 * command phase starts.  There is only one problem
905*4882a593Smuzhiyun		 * with this approach.  If the target changes phase
906*4882a593Smuzhiyun		 * before all data are sent, we have left over acks
907*4882a593Smuzhiyun		 * that can go out on the bus in a data phase.  Due
908*4882a593Smuzhiyun		 * to other chip contraints, this only happens if
909*4882a593Smuzhiyun		 * the target goes to data-in, but if the acks go
910*4882a593Smuzhiyun		 * out before we can test SDONE, we'll think that
911*4882a593Smuzhiyun		 * the transfer has completed successfully.  Work
912*4882a593Smuzhiyun		 * around this by taking advantage of the 400ns or
913*4882a593Smuzhiyun		 * 800ns dead time between command phase and the REQ
914*4882a593Smuzhiyun		 * of the new phase.  If the transfer has completed
915*4882a593Smuzhiyun		 * successfully, SCSIEN should fall *long* before we
916*4882a593Smuzhiyun		 * see a phase change.  We thus treat any phasemiss
917*4882a593Smuzhiyun		 * that occurs before SCSIEN falls as an incomplete
918*4882a593Smuzhiyun		 * transfer.
919*4882a593Smuzhiyun		 */
920*4882a593Smuzhiyun		test	SSTAT1, PHASEMIS jnz p_command_xfer_failed;
921*4882a593Smuzhiyun		test	DFCNTRL, SCSIEN jnz . - 1;
922*4882a593Smuzhiyun	} else {
923*4882a593Smuzhiyun		test	DFCNTRL, SCSIEN jnz .;
924*4882a593Smuzhiyun	}
925*4882a593Smuzhiyun	/*
926*4882a593Smuzhiyun	 * DMA Channel automatically disabled.
927*4882a593Smuzhiyun	 * Don't allow a data phase if the command
928*4882a593Smuzhiyun	 * was not fully transferred.
929*4882a593Smuzhiyun	 */
930*4882a593Smuzhiyun	test	SSTAT2, SDONE jnz ITloop;
931*4882a593Smuzhiyunp_command_xfer_failed:
932*4882a593Smuzhiyun	or	SEQ_FLAGS, NO_CDB_SENT;
933*4882a593Smuzhiyun	jmp	ITloop;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun/*
937*4882a593Smuzhiyun * Status phase.  Wait for the data byte to appear, then read it
938*4882a593Smuzhiyun * and store it into the SCB.
939*4882a593Smuzhiyun */
940*4882a593SmuzhiyunSET_SRC_MODE	M_SCSI;
941*4882a593SmuzhiyunSET_DST_MODE	M_SCSI;
942*4882a593Smuzhiyunp_status:
943*4882a593Smuzhiyun	test	SEQ_FLAGS,NOT_IDENTIFIED jnz mesgin_proto_violation;
944*4882a593Smuzhiyunp_status_okay:
945*4882a593Smuzhiyun	mov	SCB_SCSI_STATUS, SCSIDAT;
946*4882a593Smuzhiyun	or	SCB_CONTROL, STATUS_RCVD;
947*4882a593Smuzhiyun	jmp	ITloop;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun/*
950*4882a593Smuzhiyun * Message out phase.  If MSG_OUT is MSG_IDENTIFYFLAG, build a full
951*4882a593Smuzhiyun * indentify message sequence and send it to the target.  The host may
952*4882a593Smuzhiyun * override this behavior by setting the MK_MESSAGE bit in the SCB
953*4882a593Smuzhiyun * control byte.  This will cause us to interrupt the host and allow
954*4882a593Smuzhiyun * it to handle the message phase completely on its own.  If the bit
955*4882a593Smuzhiyun * associated with this target is set, we will also interrupt the host,
956*4882a593Smuzhiyun * thereby allowing it to send a message on the next selection regardless
957*4882a593Smuzhiyun * of the transaction being sent.
958*4882a593Smuzhiyun *
959*4882a593Smuzhiyun * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message.
960*4882a593Smuzhiyun * This is done to allow the host to send messages outside of an identify
961*4882a593Smuzhiyun * sequence while protecting the seqencer from testing the MK_MESSAGE bit
962*4882a593Smuzhiyun * on an SCB that might not be for the current nexus. (For example, a
963*4882a593Smuzhiyun * BDR message in response to a bad reselection would leave us pointed to
964*4882a593Smuzhiyun * an SCB that doesn't have anything to do with the current target).
965*4882a593Smuzhiyun *
966*4882a593Smuzhiyun * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag,
967*4882a593Smuzhiyun * bus device reset).
968*4882a593Smuzhiyun *
969*4882a593Smuzhiyun * When there are no messages to send, MSG_OUT should be set to MSG_NOOP,
970*4882a593Smuzhiyun * in case the target decides to put us in this phase for some strange
971*4882a593Smuzhiyun * reason.
972*4882a593Smuzhiyun */
973*4882a593Smuzhiyunp_mesgout_retry:
974*4882a593Smuzhiyun	/* Turn on ATN for the retry */
975*4882a593Smuzhiyun	mvi	SCSISIGO, ATNO;
976*4882a593Smuzhiyunp_mesgout:
977*4882a593Smuzhiyun	mov	SINDEX, MSG_OUT;
978*4882a593Smuzhiyun	cmp	SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host;
979*4882a593Smuzhiyun	test	SCB_CONTROL,MK_MESSAGE	jnz host_message_loop;
980*4882a593Smuzhiyunp_mesgout_identify:
981*4882a593Smuzhiyun	or	SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN;
982*4882a593Smuzhiyun	test	SCB_CONTROL, DISCENB jnz . + 2;
983*4882a593Smuzhiyun	and	SINDEX, ~DISCENB;
984*4882a593Smuzhiyun/*
985*4882a593Smuzhiyun * Send a tag message if TAG_ENB is set in the SCB control block.
986*4882a593Smuzhiyun * Use SCB_NONPACKET_TAG as the tag value.
987*4882a593Smuzhiyun */
988*4882a593Smuzhiyunp_mesgout_tag:
989*4882a593Smuzhiyun	test	SCB_CONTROL,TAG_ENB jz  p_mesgout_onebyte;
990*4882a593Smuzhiyun	mov	SCSIDAT, SINDEX;	/* Send the identify message */
991*4882a593Smuzhiyun	call	phase_lock;
992*4882a593Smuzhiyun	cmp	LASTPHASE, P_MESGOUT	jne p_mesgout_done;
993*4882a593Smuzhiyun	and	SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL;
994*4882a593Smuzhiyun	call	phase_lock;
995*4882a593Smuzhiyun	cmp	LASTPHASE, P_MESGOUT	jne p_mesgout_done;
996*4882a593Smuzhiyun	mov	SCBPTR jmp p_mesgout_onebyte;
997*4882a593Smuzhiyun/*
998*4882a593Smuzhiyun * Interrupt the driver, and allow it to handle this message
999*4882a593Smuzhiyun * phase and any required retries.
1000*4882a593Smuzhiyun */
1001*4882a593Smuzhiyunp_mesgout_from_host:
1002*4882a593Smuzhiyun	cmp	SINDEX, HOST_MSG	jne p_mesgout_onebyte;
1003*4882a593Smuzhiyun	jmp	host_message_loop;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyunp_mesgout_onebyte:
1006*4882a593Smuzhiyun	mvi	CLRSINT1, CLRATNO;
1007*4882a593Smuzhiyun	mov	SCSIDAT, SINDEX;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun/*
1010*4882a593Smuzhiyun * If the next bus phase after ATN drops is message out, it means
1011*4882a593Smuzhiyun * that the target is requesting that the last message(s) be resent.
1012*4882a593Smuzhiyun */
1013*4882a593Smuzhiyun	call	phase_lock;
1014*4882a593Smuzhiyun	cmp	LASTPHASE, P_MESGOUT	je p_mesgout_retry;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyunp_mesgout_done:
1017*4882a593Smuzhiyun	mvi	CLRSINT1,CLRATNO;	/* Be sure to turn ATNO off */
1018*4882a593Smuzhiyun	mov	LAST_MSG, MSG_OUT;
1019*4882a593Smuzhiyun	mvi	MSG_OUT, MSG_NOOP;	/* No message left */
1020*4882a593Smuzhiyun	jmp	ITloop;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun/*
1023*4882a593Smuzhiyun * Message in phase.  Bytes are read using Automatic PIO mode.
1024*4882a593Smuzhiyun */
1025*4882a593Smuzhiyunp_mesgin:
1026*4882a593Smuzhiyun	/* read the 1st message byte */
1027*4882a593Smuzhiyun	mvi	ACCUM		call inb_first;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun	test	A,MSG_IDENTIFYFLAG	jnz mesgin_identify;
1030*4882a593Smuzhiyun	cmp	A,MSG_DISCONNECT	je mesgin_disconnect;
1031*4882a593Smuzhiyun	cmp	A,MSG_SAVEDATAPOINTER	je mesgin_sdptrs;
1032*4882a593Smuzhiyun	cmp	ALLZEROS,A		je mesgin_complete;
1033*4882a593Smuzhiyun	cmp	A,MSG_RESTOREPOINTERS	je mesgin_rdptrs;
1034*4882a593Smuzhiyun	cmp	A,MSG_IGN_WIDE_RESIDUE	je mesgin_ign_wide_residue;
1035*4882a593Smuzhiyun	cmp	A,MSG_NOOP		je mesgin_done;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun/*
1038*4882a593Smuzhiyun * Pushed message loop to allow the kernel to
1039*4882a593Smuzhiyun * run it's own message state engine.  To avoid an
1040*4882a593Smuzhiyun * extra nop instruction after signaling the kernel,
1041*4882a593Smuzhiyun * we perform the phase_lock before checking to see
1042*4882a593Smuzhiyun * if we should exit the loop and skip the phase_lock
1043*4882a593Smuzhiyun * in the ITloop.  Performing back to back phase_locks
1044*4882a593Smuzhiyun * shouldn't hurt, but why do it twice...
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyunhost_message_loop:
1047*4882a593Smuzhiyun	call	phase_lock;	/* Benign the first time through. */
1048*4882a593Smuzhiyun	SET_SEQINTCODE(HOST_MSG_LOOP)
1049*4882a593Smuzhiyun	cmp	RETURN_1, EXIT_MSG_LOOP	je ITloop;
1050*4882a593Smuzhiyun	cmp	RETURN_1, CONT_MSG_LOOP_WRITE	jne . + 3;
1051*4882a593Smuzhiyun	mov	SCSIDAT, RETURN_2;
1052*4882a593Smuzhiyun	jmp	host_message_loop;
1053*4882a593Smuzhiyun	/* Must be CONT_MSG_LOOP_READ */
1054*4882a593Smuzhiyun	mov	NONE, SCSIDAT;	/* ACK Byte */
1055*4882a593Smuzhiyun	jmp	host_message_loop;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyunmesgin_ign_wide_residue:
1058*4882a593Smuzhiyun	mov	SAVED_MODE, MODE_PTR;
1059*4882a593Smuzhiyun	SET_MODE(M_SCSI, M_SCSI)
1060*4882a593Smuzhiyun	shr	NEGOADDR, 4, SAVED_SCSIID;
1061*4882a593Smuzhiyun	mov	A, NEGCONOPTS;
1062*4882a593Smuzhiyun	RESTORE_MODE(SAVED_MODE)
1063*4882a593Smuzhiyun	test	A, WIDEXFER jz mesgin_reject;
1064*4882a593Smuzhiyun	/* Pull the residue byte */
1065*4882a593Smuzhiyun	mvi	REG0	call inb_next;
1066*4882a593Smuzhiyun	cmp	REG0, 0x01 jne mesgin_reject;
1067*4882a593Smuzhiyun	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2;
1068*4882a593Smuzhiyun	test	SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jnz mesgin_done;
1069*4882a593Smuzhiyun	SET_SEQINTCODE(IGN_WIDE_RES)
1070*4882a593Smuzhiyun	jmp	mesgin_done;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyunmesgin_proto_violation:
1073*4882a593Smuzhiyun	SET_SEQINTCODE(PROTO_VIOLATION)
1074*4882a593Smuzhiyun	jmp	mesgin_done;
1075*4882a593Smuzhiyunmesgin_reject:
1076*4882a593Smuzhiyun	mvi	MSG_MESSAGE_REJECT	call mk_mesg;
1077*4882a593Smuzhiyunmesgin_done:
1078*4882a593Smuzhiyun	mov	NONE,SCSIDAT;		/*dummy read from latch to ACK*/
1079*4882a593Smuzhiyun	jmp	ITloop;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun#define INDEX_DISC_LIST(scsiid, lun)					\
1082*4882a593Smuzhiyun	and	A, 0xC0, scsiid;					\
1083*4882a593Smuzhiyun	or	SCBPTR, A, lun;						\
1084*4882a593Smuzhiyun	clr	SCBPTR[1];						\
1085*4882a593Smuzhiyun	and	SINDEX, 0x30, scsiid;					\
1086*4882a593Smuzhiyun	shr	SINDEX, 3;	/* Multiply by 2 */			\
1087*4882a593Smuzhiyun	add	SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF);		\
1088*4882a593Smuzhiyun	mvi	SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF)
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyunmesgin_identify:
1091*4882a593Smuzhiyun	/*
1092*4882a593Smuzhiyun	 * Determine whether a target is using tagged or non-tagged
1093*4882a593Smuzhiyun	 * transactions by first looking at the transaction stored in
1094*4882a593Smuzhiyun	 * the per-device, disconnected array.  If there is no untagged
1095*4882a593Smuzhiyun	 * transaction for this target, this must be a tagged transaction.
1096*4882a593Smuzhiyun	 */
1097*4882a593Smuzhiyun	and	SAVED_LUN, MSG_IDENTIFY_LUNMASK, A;
1098*4882a593Smuzhiyun	INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN);
1099*4882a593Smuzhiyun	bmov	DINDEX, SINDEX, 2;
1100*4882a593Smuzhiyun	bmov	REG0, SINDIR, 2;
1101*4882a593Smuzhiyun	cmp	REG0[1], SCB_LIST_NULL je snoop_tag;
1102*4882a593Smuzhiyun	/* Untagged.  Clear the busy table entry and setup the SCB. */
1103*4882a593Smuzhiyun	bmov	DINDIR, ALLONES, 2;
1104*4882a593Smuzhiyun	bmov	SCBPTR, REG0, 2;
1105*4882a593Smuzhiyun	jmp	setup_SCB;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun/*
1108*4882a593Smuzhiyun * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message.
1109*4882a593Smuzhiyun * If we get one, we use the tag returned to find the proper
1110*4882a593Smuzhiyun * SCB.  After receiving the tag, look for the SCB at SCB locations tag and
1111*4882a593Smuzhiyun * tag + 256.
1112*4882a593Smuzhiyun */
1113*4882a593Smuzhiyunsnoop_tag:
1114*4882a593Smuzhiyun	if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
1115*4882a593Smuzhiyun		or	SEQ_FLAGS, 0x80;
1116*4882a593Smuzhiyun	}
1117*4882a593Smuzhiyun	mov	NONE, SCSIDAT;		/* ACK Identify MSG */
1118*4882a593Smuzhiyun	call	phase_lock;
1119*4882a593Smuzhiyun	if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
1120*4882a593Smuzhiyun		or	SEQ_FLAGS, 0x1;
1121*4882a593Smuzhiyun	}
1122*4882a593Smuzhiyun	cmp	LASTPHASE, P_MESGIN	jne not_found_ITloop;
1123*4882a593Smuzhiyun	if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
1124*4882a593Smuzhiyun		or	SEQ_FLAGS, 0x2;
1125*4882a593Smuzhiyun	}
1126*4882a593Smuzhiyun	cmp	SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found;
1127*4882a593Smuzhiyunget_tag:
1128*4882a593Smuzhiyun	clr	SCBPTR[1];
1129*4882a593Smuzhiyun	mvi	SCBPTR	call inb_next;	/* tag value */
1130*4882a593Smuzhiyunverify_scb:
1131*4882a593Smuzhiyun	test	SCB_CONTROL,DISCONNECTED jz verify_other_scb;
1132*4882a593Smuzhiyun	mov	A, SAVED_SCSIID;
1133*4882a593Smuzhiyun	cmp	SCB_SCSIID, A jne verify_other_scb;
1134*4882a593Smuzhiyun	mov	A, SAVED_LUN;
1135*4882a593Smuzhiyun	cmp	SCB_LUN, A je setup_SCB_disconnected;
1136*4882a593Smuzhiyunverify_other_scb:
1137*4882a593Smuzhiyun	xor	SCBPTR[1], 1;
1138*4882a593Smuzhiyun	test	SCBPTR[1], 0xFF jnz verify_scb;
1139*4882a593Smuzhiyun	jmp	not_found;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun/*
1142*4882a593Smuzhiyun * Ensure that the SCB the tag points to is for
1143*4882a593Smuzhiyun * an SCB transaction to the reconnecting target.
1144*4882a593Smuzhiyun */
1145*4882a593Smuzhiyunsetup_SCB:
1146*4882a593Smuzhiyun	if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
1147*4882a593Smuzhiyun		or	SEQ_FLAGS, 0x10;
1148*4882a593Smuzhiyun	}
1149*4882a593Smuzhiyun	test	SCB_CONTROL,DISCONNECTED jz not_found;
1150*4882a593Smuzhiyunsetup_SCB_disconnected:
1151*4882a593Smuzhiyun	and	SCB_CONTROL,~DISCONNECTED;
1152*4882a593Smuzhiyun	clr	SEQ_FLAGS;	/* make note of IDENTIFY */
1153*4882a593Smuzhiyun	test	SCB_SGPTR, SG_LIST_NULL jnz . + 3;
1154*4882a593Smuzhiyun	bmov	ALLOCFIFO_SCBPTR, SCBPTR, 2;
1155*4882a593Smuzhiyun	call	allocate_fifo;
1156*4882a593Smuzhiyun	/* See if the host wants to send a message upon reconnection */
1157*4882a593Smuzhiyun	test	SCB_CONTROL, MK_MESSAGE jz mesgin_done;
1158*4882a593Smuzhiyun	mvi	HOST_MSG	call mk_mesg;
1159*4882a593Smuzhiyun	jmp	mesgin_done;
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyunnot_found:
1162*4882a593Smuzhiyun	SET_SEQINTCODE(NO_MATCH)
1163*4882a593Smuzhiyun	jmp	mesgin_done;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyunnot_found_ITloop:
1166*4882a593Smuzhiyun	SET_SEQINTCODE(NO_MATCH)
1167*4882a593Smuzhiyun	jmp	ITloop;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun/*
1170*4882a593Smuzhiyun * We received a "command complete" message.  Put the SCB on the complete
1171*4882a593Smuzhiyun * queue and trigger a completion interrupt via the idle loop.  Before doing
1172*4882a593Smuzhiyun * so, check to see if there is a residual or the status byte is something
1173*4882a593Smuzhiyun * other than STATUS_GOOD (0).  In either of these conditions, we upload the
1174*4882a593Smuzhiyun * SCB back to the host so it can process this information.
1175*4882a593Smuzhiyun */
1176*4882a593Smuzhiyunmesgin_complete:
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun	/*
1179*4882a593Smuzhiyun	 * If ATN is raised, we still want to give the target a message.
1180*4882a593Smuzhiyun	 * Perhaps there was a parity error on this last message byte.
1181*4882a593Smuzhiyun	 * Either way, the target should take us to message out phase
1182*4882a593Smuzhiyun	 * and then attempt to complete the command again.  We should use a
1183*4882a593Smuzhiyun	 * critical section here to guard against a timeout triggering
1184*4882a593Smuzhiyun	 * for this command and setting ATN while we are still processing
1185*4882a593Smuzhiyun	 * the completion.
1186*4882a593Smuzhiyun	test	SCSISIGI, ATNI jnz mesgin_done;
1187*4882a593Smuzhiyun	 */
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun	/*
1190*4882a593Smuzhiyun	 * If we are identified and have successfully sent the CDB,
1191*4882a593Smuzhiyun	 * any status will do.  Optimize this fast path.
1192*4882a593Smuzhiyun	 */
1193*4882a593Smuzhiyun	test	SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation;
1194*4882a593Smuzhiyun	test	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun	/*
1197*4882a593Smuzhiyun	 * If the target never sent an identify message but instead went
1198*4882a593Smuzhiyun	 * to mesgin to give an invalid message, let the host abort us.
1199*4882a593Smuzhiyun	 */
1200*4882a593Smuzhiyun	test	SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun	/*
1203*4882a593Smuzhiyun	 * If we recevied good status but never successfully sent the
1204*4882a593Smuzhiyun	 * cdb, abort the command.
1205*4882a593Smuzhiyun	 */
1206*4882a593Smuzhiyun	test	SCB_SCSI_STATUS,0xff	jnz complete_accepted;
1207*4882a593Smuzhiyun	test	SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation;
1208*4882a593Smuzhiyuncomplete_accepted:
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun	/*
1211*4882a593Smuzhiyun	 * See if we attempted to deliver a message but the target ingnored us.
1212*4882a593Smuzhiyun	 */
1213*4882a593Smuzhiyun	test	SCB_CONTROL, MK_MESSAGE jz complete_nomsg;
1214*4882a593Smuzhiyun	SET_SEQINTCODE(MKMSG_FAILED)
1215*4882a593Smuzhiyuncomplete_nomsg:
1216*4882a593Smuzhiyun	call	queue_scb_completion;
1217*4882a593Smuzhiyun	jmp	await_busfree;
1218*4882a593Smuzhiyun
1219*4882a593SmuzhiyunBEGIN_CRITICAL;
1220*4882a593Smuzhiyunfreeze_queue:
1221*4882a593Smuzhiyun	/* Cancel any pending select-out. */
1222*4882a593Smuzhiyun	test	SSTAT0, SELDO|SELINGO jnz . + 2;
1223*4882a593Smuzhiyun	and	SCSISEQ0, ~ENSELO;
1224*4882a593Smuzhiyun	mov	ACCUM_SAVE, A;
1225*4882a593Smuzhiyun	clr	A;
1226*4882a593Smuzhiyun	add	QFREEZE_COUNT, 1;
1227*4882a593Smuzhiyun	adc	QFREEZE_COUNT[1], A;
1228*4882a593Smuzhiyun	or	SEQ_FLAGS2, SELECTOUT_QFROZEN;
1229*4882a593Smuzhiyun	mov	A, ACCUM_SAVE ret;
1230*4882a593SmuzhiyunEND_CRITICAL;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun/*
1233*4882a593Smuzhiyun * Complete the current FIFO's SCB if data for this same
1234*4882a593Smuzhiyun * SCB is not transferring in the other FIFO.
1235*4882a593Smuzhiyun */
1236*4882a593SmuzhiyunSET_SRC_MODE	M_DFF1;
1237*4882a593SmuzhiyunSET_DST_MODE	M_DFF1;
1238*4882a593Smuzhiyunpkt_complete_scb_if_fifos_idle:
1239*4882a593Smuzhiyun	bmov	ARG_1, SCBPTR, 2;
1240*4882a593Smuzhiyun	mvi	DFFSXFRCTL, CLRCHN;
1241*4882a593Smuzhiyun	SET_MODE(M_SCSI, M_SCSI)
1242*4882a593Smuzhiyun	bmov	SCBPTR, ARG_1, 2;
1243*4882a593Smuzhiyun	test	SCB_FIFO_USE_COUNT, 0xFF jnz return;
1244*4882a593Smuzhiyunqueue_scb_completion:
1245*4882a593Smuzhiyun	test	SCB_SCSI_STATUS,0xff	jnz bad_status;
1246*4882a593Smuzhiyun	/*
1247*4882a593Smuzhiyun	 * Check for residuals
1248*4882a593Smuzhiyun	 */
1249*4882a593Smuzhiyun	test	SCB_SGPTR, SG_LIST_NULL jnz complete;	/* No xfer */
1250*4882a593Smuzhiyun	test	SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */
1251*4882a593Smuzhiyun	test	SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb;
1252*4882a593Smuzhiyuncomplete:
1253*4882a593SmuzhiyunBEGIN_CRITICAL;
1254*4882a593Smuzhiyun	bmov	SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
1255*4882a593Smuzhiyun	bmov	COMPLETE_SCB_HEAD, SCBPTR, 2 ret;
1256*4882a593SmuzhiyunEND_CRITICAL;
1257*4882a593Smuzhiyunbad_status:
1258*4882a593Smuzhiyun	cmp	SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb;
1259*4882a593Smuzhiyun	call	freeze_queue;
1260*4882a593Smuzhiyunupload_scb:
1261*4882a593Smuzhiyun	/*
1262*4882a593Smuzhiyun	 * Restore SCB TAG since we reuse this field
1263*4882a593Smuzhiyun	 * in the sequencer.  We don't want to corrupt
1264*4882a593Smuzhiyun	 * it on the host.
1265*4882a593Smuzhiyun	 */
1266*4882a593Smuzhiyun	bmov	SCB_TAG, SCBPTR, 2;
1267*4882a593SmuzhiyunBEGIN_CRITICAL;
1268*4882a593Smuzhiyun	or	SCB_SGPTR, SG_STATUS_VALID;
1269*4882a593Smuzhiyun	mvi	SCB_NEXT_COMPLETE[1], SCB_LIST_NULL;
1270*4882a593Smuzhiyun	cmp	COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne add_dma_scb_tail;
1271*4882a593Smuzhiyun	bmov	COMPLETE_DMA_SCB_HEAD, SCBPTR, 2;
1272*4882a593Smuzhiyun	bmov	COMPLETE_DMA_SCB_TAIL, SCBPTR, 2 ret;
1273*4882a593Smuzhiyunadd_dma_scb_tail:
1274*4882a593Smuzhiyun	bmov	REG0, SCBPTR, 2;
1275*4882a593Smuzhiyun	bmov	SCBPTR, COMPLETE_DMA_SCB_TAIL, 2;
1276*4882a593Smuzhiyun	bmov	SCB_NEXT_COMPLETE, REG0, 2;
1277*4882a593Smuzhiyun	bmov	COMPLETE_DMA_SCB_TAIL, REG0, 2 ret;
1278*4882a593SmuzhiyunEND_CRITICAL;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun/*
1281*4882a593Smuzhiyun * Is it a disconnect message?  Set a flag in the SCB to remind us
1282*4882a593Smuzhiyun * and await the bus going free.  If this is an untagged transaction
1283*4882a593Smuzhiyun * store the SCB id for it in our untagged target table for lookup on
1284*4882a593Smuzhiyun * a reselection.
1285*4882a593Smuzhiyun */
1286*4882a593Smuzhiyunmesgin_disconnect:
1287*4882a593Smuzhiyun	/*
1288*4882a593Smuzhiyun	 * If ATN is raised, we still want to give the target a message.
1289*4882a593Smuzhiyun	 * Perhaps there was a parity error on this last message byte
1290*4882a593Smuzhiyun	 * or we want to abort this command.  Either way, the target
1291*4882a593Smuzhiyun	 * should take us to message out phase and then attempt to
1292*4882a593Smuzhiyun	 * disconnect again.
1293*4882a593Smuzhiyun	 * XXX - Wait for more testing.
1294*4882a593Smuzhiyun	test	SCSISIGI, ATNI jnz mesgin_done;
1295*4882a593Smuzhiyun	 */
1296*4882a593Smuzhiyun	test	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT
1297*4882a593Smuzhiyun		jnz mesgin_proto_violation;
1298*4882a593Smuzhiyun	or	SCB_CONTROL,DISCONNECTED;
1299*4882a593Smuzhiyun	test	SCB_CONTROL, TAG_ENB jnz await_busfree;
1300*4882a593Smuzhiyunqueue_disc_scb:
1301*4882a593Smuzhiyun	bmov	REG0, SCBPTR, 2;
1302*4882a593Smuzhiyun	INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN);
1303*4882a593Smuzhiyun	bmov	DINDEX, SINDEX, 2;
1304*4882a593Smuzhiyun	bmov	DINDIR, REG0, 2;
1305*4882a593Smuzhiyun	bmov	SCBPTR, REG0, 2;
1306*4882a593Smuzhiyun	/* FALLTHROUGH */
1307*4882a593Smuzhiyunawait_busfree:
1308*4882a593Smuzhiyun	and	SIMODE1, ~ENBUSFREE;
1309*4882a593Smuzhiyun	if ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0) {
1310*4882a593Smuzhiyun		/*
1311*4882a593Smuzhiyun		 * In the BUSFREEREV_BUG case, the
1312*4882a593Smuzhiyun		 * busfree status was cleared at the
1313*4882a593Smuzhiyun		 * beginning of the connection.
1314*4882a593Smuzhiyun		 */
1315*4882a593Smuzhiyun		mvi	CLRSINT1,CLRBUSFREE;
1316*4882a593Smuzhiyun	}
1317*4882a593Smuzhiyun	mov	NONE, SCSIDAT;		/* Ack the last byte */
1318*4882a593Smuzhiyun	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
1319*4882a593Smuzhiyun		jnz await_busfree_not_m_dff;
1320*4882a593SmuzhiyunSET_SRC_MODE	M_DFF1;
1321*4882a593SmuzhiyunSET_DST_MODE	M_DFF1;
1322*4882a593Smuzhiyunawait_busfree_clrchn:
1323*4882a593Smuzhiyun	mvi	DFFSXFRCTL, CLRCHN;
1324*4882a593Smuzhiyunawait_busfree_not_m_dff:
1325*4882a593Smuzhiyun	/* clear target specific flags */
1326*4882a593Smuzhiyun	mvi	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT;
1327*4882a593Smuzhiyun	test	SSTAT1,REQINIT|BUSFREE	jz .;
1328*4882a593Smuzhiyun	/*
1329*4882a593Smuzhiyun	 * We only set BUSFREE status once either a new
1330*4882a593Smuzhiyun	 * phase has been detected or we are really
1331*4882a593Smuzhiyun	 * BUSFREE.  This allows the driver to know
1332*4882a593Smuzhiyun	 * that we are active on the bus even though
1333*4882a593Smuzhiyun	 * no identified transaction exists should a
1334*4882a593Smuzhiyun	 * timeout occur while awaiting busfree.
1335*4882a593Smuzhiyun	 */
1336*4882a593Smuzhiyun	mvi	LASTPHASE, P_BUSFREE;
1337*4882a593Smuzhiyun	test	SSTAT1, BUSFREE jnz idle_loop;
1338*4882a593Smuzhiyun	SET_SEQINTCODE(MISSED_BUSFREE)
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun/*
1342*4882a593Smuzhiyun * Save data pointers message:
1343*4882a593Smuzhiyun * Copying RAM values back to SCB, for Save Data Pointers message, but
1344*4882a593Smuzhiyun * only if we've actually been into a data phase to change them.  This
1345*4882a593Smuzhiyun * protects against bogus data in scratch ram and the residual counts
1346*4882a593Smuzhiyun * since they are only initialized when we go into data_in or data_out.
1347*4882a593Smuzhiyun * Ack the message as soon as possible.
1348*4882a593Smuzhiyun */
1349*4882a593SmuzhiyunSET_SRC_MODE	M_DFF1;
1350*4882a593SmuzhiyunSET_DST_MODE	M_DFF1;
1351*4882a593Smuzhiyunmesgin_sdptrs:
1352*4882a593Smuzhiyun	mov	NONE,SCSIDAT;		/*dummy read from latch to ACK*/
1353*4882a593Smuzhiyun	test	SEQ_FLAGS, DPHASE	jz ITloop;
1354*4882a593Smuzhiyun	call	save_pointers;
1355*4882a593Smuzhiyun	jmp	ITloop;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyunsave_pointers:
1358*4882a593Smuzhiyun	/*
1359*4882a593Smuzhiyun	 * If we are asked to save our position at the end of the
1360*4882a593Smuzhiyun	 * transfer, just mark us at the end rather than perform a
1361*4882a593Smuzhiyun	 * full save.
1362*4882a593Smuzhiyun	 */
1363*4882a593Smuzhiyun	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full;
1364*4882a593Smuzhiyun	or	SCB_SGPTR, SG_LIST_NULL ret;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyunsave_pointers_full:
1367*4882a593Smuzhiyun	/*
1368*4882a593Smuzhiyun	 * The SCB_DATAPTR becomes the current SHADDR.
1369*4882a593Smuzhiyun	 * All other information comes directly from our residual
1370*4882a593Smuzhiyun	 * state.
1371*4882a593Smuzhiyun	 */
1372*4882a593Smuzhiyun	bmov	SCB_DATAPTR, SHADDR, 8;
1373*4882a593Smuzhiyun	bmov	SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun/*
1376*4882a593Smuzhiyun * Restore pointers message?  Data pointers are recopied from the
1377*4882a593Smuzhiyun * SCB anytime we enter a data phase for the first time, so all
1378*4882a593Smuzhiyun * we need to do is clear the DPHASE flag and let the data phase
1379*4882a593Smuzhiyun * code do the rest.  We also reset/reallocate the FIFO to make
1380*4882a593Smuzhiyun * sure we have a clean start for the next data or command phase.
1381*4882a593Smuzhiyun */
1382*4882a593Smuzhiyunmesgin_rdptrs:
1383*4882a593Smuzhiyun	and	SEQ_FLAGS, ~DPHASE;
1384*4882a593Smuzhiyun	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo;
1385*4882a593Smuzhiyun	mvi	DFFSXFRCTL, RSTCHN|CLRSHCNT;
1386*4882a593Smuzhiyun	SET_MODE(M_SCSI, M_SCSI)
1387*4882a593Smuzhiyunmsgin_rdptrs_get_fifo:
1388*4882a593Smuzhiyun	call	allocate_fifo;
1389*4882a593Smuzhiyun	jmp	mesgin_done;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyunphase_lock:
1392*4882a593Smuzhiyun	if ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0) {
1393*4882a593Smuzhiyun		/*
1394*4882a593Smuzhiyun		 * Don't ignore persistent REQ assertions just because
1395*4882a593Smuzhiyun		 * they were asserted within the bus settle delay window.
1396*4882a593Smuzhiyun		 * This allows us to tolerate devices like the GEM318
1397*4882a593Smuzhiyun		 * that violate the SCSI spec.  We are careful not to
1398*4882a593Smuzhiyun		 * count REQ while we are waiting for it to fall during
1399*4882a593Smuzhiyun		 * an async phase due to our asserted ACK.  Each
1400*4882a593Smuzhiyun		 * sequencer instruction takes ~25ns, so the REQ must
1401*4882a593Smuzhiyun		 * last at least 100ns in order to be counted as a true
1402*4882a593Smuzhiyun		 * REQ.
1403*4882a593Smuzhiyun		 */
1404*4882a593Smuzhiyun		test	SCSIPHASE, 0xFF jnz phase_locked;
1405*4882a593Smuzhiyun		test	SCSISIGI, ACKI jnz phase_lock;
1406*4882a593Smuzhiyun		test	SCSISIGI, REQI jz phase_lock;
1407*4882a593Smuzhiyun		test	SCSIPHASE, 0xFF jnz phase_locked;
1408*4882a593Smuzhiyun		test	SCSISIGI, ACKI jnz phase_lock;
1409*4882a593Smuzhiyun		test	SCSISIGI, REQI jz phase_lock;
1410*4882a593Smuzhiyunphase_locked:
1411*4882a593Smuzhiyun	} else {
1412*4882a593Smuzhiyun		test	SCSIPHASE, 0xFF jz .;
1413*4882a593Smuzhiyun	}
1414*4882a593Smuzhiyun	test	SSTAT1, SCSIPERR jnz phase_lock;
1415*4882a593Smuzhiyunphase_lock_latch_phase:
1416*4882a593Smuzhiyun	and	LASTPHASE, PHASE_MASK, SCSISIGI ret;
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun/*
1419*4882a593Smuzhiyun * Functions to read data in Automatic PIO mode.
1420*4882a593Smuzhiyun *
1421*4882a593Smuzhiyun * An ACK is not sent on input from the target until SCSIDATL is read from.
1422*4882a593Smuzhiyun * So we wait until SCSIDATL is latched (the usual way), then read the data
1423*4882a593Smuzhiyun * byte directly off the bus using SCSIBUSL.  When we have pulled the ATN
1424*4882a593Smuzhiyun * line, or we just want to acknowledge the byte, then we do a dummy read
1425*4882a593Smuzhiyun * from SCISDATL.  The SCSI spec guarantees that the target will hold the
1426*4882a593Smuzhiyun * data byte on the bus until we send our ACK.
1427*4882a593Smuzhiyun *
1428*4882a593Smuzhiyun * The assumption here is that these are called in a particular sequence,
1429*4882a593Smuzhiyun * and that REQ is already set when inb_first is called.  inb_{first,next}
1430*4882a593Smuzhiyun * use the same calling convention as inb.
1431*4882a593Smuzhiyun */
1432*4882a593Smuzhiyuninb_next:
1433*4882a593Smuzhiyun	mov	NONE,SCSIDAT;		/*dummy read from latch to ACK*/
1434*4882a593Smuzhiyuninb_next_wait:
1435*4882a593Smuzhiyun	/*
1436*4882a593Smuzhiyun	 * If there is a parity error, wait for the kernel to
1437*4882a593Smuzhiyun	 * see the interrupt and prepare our message response
1438*4882a593Smuzhiyun	 * before continuing.
1439*4882a593Smuzhiyun	 */
1440*4882a593Smuzhiyun	test	SCSIPHASE, 0xFF jz .;
1441*4882a593Smuzhiyun	test	SSTAT1, SCSIPERR jnz inb_next_wait;
1442*4882a593Smuzhiyuninb_next_check_phase:
1443*4882a593Smuzhiyun	and	LASTPHASE, PHASE_MASK, SCSISIGI;
1444*4882a593Smuzhiyun	cmp	LASTPHASE, P_MESGIN jne mesgin_phasemis;
1445*4882a593Smuzhiyuninb_first:
1446*4882a593Smuzhiyun	clr	DINDEX[1];
1447*4882a593Smuzhiyun	mov	DINDEX,SINDEX;
1448*4882a593Smuzhiyun	mov	DINDIR,SCSIBUS	ret;		/*read byte directly from bus*/
1449*4882a593Smuzhiyuninb_last:
1450*4882a593Smuzhiyun	mov	NONE,SCSIDAT ret;		/*dummy read from latch to ACK*/
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyunmk_mesg:
1453*4882a593Smuzhiyun	mvi	SCSISIGO, ATNO;
1454*4882a593Smuzhiyun	mov	MSG_OUT,SINDEX ret;
1455*4882a593Smuzhiyun
1456*4882a593SmuzhiyunSET_SRC_MODE	M_DFF1;
1457*4882a593SmuzhiyunSET_DST_MODE	M_DFF1;
1458*4882a593Smuzhiyundisable_ccsgen:
1459*4882a593Smuzhiyun	test	SG_STATE, FETCH_INPROG jz disable_ccsgen_fetch_done;
1460*4882a593Smuzhiyun	clr	CCSGCTL;
1461*4882a593Smuzhiyundisable_ccsgen_fetch_done:
1462*4882a593Smuzhiyun	clr	SG_STATE ret;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyunservice_fifo:
1465*4882a593Smuzhiyun	/*
1466*4882a593Smuzhiyun	 * Do we have any prefetch left???
1467*4882a593Smuzhiyun	 */
1468*4882a593Smuzhiyun	test	SG_STATE, SEGS_AVAIL jnz idle_sg_avail;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun	/*
1471*4882a593Smuzhiyun	 * Can this FIFO have access to the S/G cache yet?
1472*4882a593Smuzhiyun	 */
1473*4882a593Smuzhiyun	test	CCSGCTL, SG_CACHE_AVAIL jz return;
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun	/* Did we just finish fetching segs? */
1476*4882a593Smuzhiyun	test	CCSGCTL, CCSGDONE jnz idle_sgfetch_complete;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun	/* Are we actively fetching segments? */
1479*4882a593Smuzhiyun	test	CCSGCTL, CCSGENACK jnz return;
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun	/*
1482*4882a593Smuzhiyun	 * Should the other FIFO get the S/G cache first?  If
1483*4882a593Smuzhiyun	 * both FIFOs have been allocated since we last checked
1484*4882a593Smuzhiyun	 * any FIFO, it is important that we service a FIFO
1485*4882a593Smuzhiyun	 * that is not actively on the bus first.  This guarantees
1486*4882a593Smuzhiyun	 * that a FIFO will be freed to handle snapshot requests for
1487*4882a593Smuzhiyun	 * any FIFO that is still on the bus.  Chips with RTI do not
1488*4882a593Smuzhiyun	 * perform snapshots, so don't bother with this test there.
1489*4882a593Smuzhiyun	 */
1490*4882a593Smuzhiyun	if ((ahd->features & AHD_RTI) == 0) {
1491*4882a593Smuzhiyun		/*
1492*4882a593Smuzhiyun		 * If we're not still receiving SCSI data,
1493*4882a593Smuzhiyun		 * it is safe to allocate the S/G cache to
1494*4882a593Smuzhiyun		 * this FIFO.
1495*4882a593Smuzhiyun		 */
1496*4882a593Smuzhiyun		test	DFCNTRL, SCSIEN jz idle_sgfetch_start;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun		/*
1499*4882a593Smuzhiyun		 * Switch to the other FIFO.  Non-RTI chips
1500*4882a593Smuzhiyun		 * also have the "set mode" bug, so we must
1501*4882a593Smuzhiyun		 * disable interrupts during the switch.
1502*4882a593Smuzhiyun		 */
1503*4882a593Smuzhiyun		mvi	SEQINTCTL, INTVEC1DSL;
1504*4882a593Smuzhiyun		xor	MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun		/*
1507*4882a593Smuzhiyun		 * If the other FIFO needs loading, then it
1508*4882a593Smuzhiyun		 * must not have claimed the S/G cache yet
1509*4882a593Smuzhiyun		 * (SG_CACHE_AVAIL would have been cleared in
1510*4882a593Smuzhiyun		 * the original FIFO mode and we test this above).
1511*4882a593Smuzhiyun		 * Return to the idle loop so we can process the
1512*4882a593Smuzhiyun		 * FIFO not currently on the bus first.
1513*4882a593Smuzhiyun		 */
1514*4882a593Smuzhiyun		test	SG_STATE, LOADING_NEEDED jz idle_sgfetch_okay;
1515*4882a593Smuzhiyun		clr	SEQINTCTL ret;
1516*4882a593Smuzhiyunidle_sgfetch_okay:
1517*4882a593Smuzhiyun		xor	MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
1518*4882a593Smuzhiyun		clr	SEQINTCTL;
1519*4882a593Smuzhiyun	}
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyunidle_sgfetch_start:
1522*4882a593Smuzhiyun	/*
1523*4882a593Smuzhiyun	 * We fetch a "cacheline aligned" and sized amount of data
1524*4882a593Smuzhiyun	 * so we don't end up referencing a non-existent page.
1525*4882a593Smuzhiyun	 * Cacheline aligned is in quotes because the kernel will
1526*4882a593Smuzhiyun	 * set the prefetch amount to a reasonable level if the
1527*4882a593Smuzhiyun	 * cacheline size is unknown.
1528*4882a593Smuzhiyun	 */
1529*4882a593Smuzhiyun	bmov	SGHADDR, SCB_RESIDUAL_SGPTR, 4;
1530*4882a593Smuzhiyun	mvi	SGHCNT, SG_PREFETCH_CNT;
1531*4882a593Smuzhiyun	if ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0) {
1532*4882a593Smuzhiyun		/*
1533*4882a593Smuzhiyun		 * Need two instructions between "touches" of SGHADDR.
1534*4882a593Smuzhiyun		 */
1535*4882a593Smuzhiyun		nop;
1536*4882a593Smuzhiyun	}
1537*4882a593Smuzhiyun	and	SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR;
1538*4882a593Smuzhiyun	mvi	CCSGCTL, CCSGEN|CCSGRESET;
1539*4882a593Smuzhiyun	or	SG_STATE, FETCH_INPROG ret;
1540*4882a593Smuzhiyunidle_sgfetch_complete:
1541*4882a593Smuzhiyun	/*
1542*4882a593Smuzhiyun	 * Guard against SG_CACHE_AVAIL activating during sg fetch
1543*4882a593Smuzhiyun	 * request in the other FIFO.
1544*4882a593Smuzhiyun	 */
1545*4882a593Smuzhiyun	test	SG_STATE, FETCH_INPROG jz return;
1546*4882a593Smuzhiyun	clr	CCSGCTL;
1547*4882a593Smuzhiyun	and	CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR;
1548*4882a593Smuzhiyun	mvi	SG_STATE, SEGS_AVAIL|LOADING_NEEDED;
1549*4882a593Smuzhiyunidle_sg_avail:
1550*4882a593Smuzhiyun	/* Does the hardware have space for another SG entry? */
1551*4882a593Smuzhiyun	test	DFSTATUS, PRELOAD_AVAIL jz return;
1552*4882a593Smuzhiyun	/*
1553*4882a593Smuzhiyun	 * On the A, preloading a segment before HDMAENACK
1554*4882a593Smuzhiyun	 * comes true can clobber the shadow address of the
1555*4882a593Smuzhiyun	 * first segment in the S/G FIFO.  Wait until it is
1556*4882a593Smuzhiyun	 * safe to proceed.
1557*4882a593Smuzhiyun	 */
1558*4882a593Smuzhiyun	if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0) {
1559*4882a593Smuzhiyun		test	DFCNTRL, HDMAENACK jz return;
1560*4882a593Smuzhiyun	}
1561*4882a593Smuzhiyun	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
1562*4882a593Smuzhiyun		bmov	HADDR, CCSGRAM, 8;
1563*4882a593Smuzhiyun	} else {
1564*4882a593Smuzhiyun		bmov 	HADDR, CCSGRAM, 4;
1565*4882a593Smuzhiyun	}
1566*4882a593Smuzhiyun	bmov	HCNT, CCSGRAM, 3;
1567*4882a593Smuzhiyun	bmov	SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1;
1568*4882a593Smuzhiyun	if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
1569*4882a593Smuzhiyun		and	HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3];
1570*4882a593Smuzhiyun	}
1571*4882a593Smuzhiyun	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
1572*4882a593Smuzhiyun		/* Skip 4 bytes of pad. */
1573*4882a593Smuzhiyun		add	CCSGADDR, 4;
1574*4882a593Smuzhiyun	}
1575*4882a593Smuzhiyunsg_advance:
1576*4882a593Smuzhiyun	clr	A;			/* add sizeof(struct scatter) */
1577*4882a593Smuzhiyun	add	SCB_RESIDUAL_SGPTR[0],SG_SIZEOF;
1578*4882a593Smuzhiyun	adc	SCB_RESIDUAL_SGPTR[1],A;
1579*4882a593Smuzhiyun	adc	SCB_RESIDUAL_SGPTR[2],A;
1580*4882a593Smuzhiyun	adc	SCB_RESIDUAL_SGPTR[3],A;
1581*4882a593Smuzhiyun	mov	SINDEX, SCB_RESIDUAL_SGPTR[0];
1582*4882a593Smuzhiyun	test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3;
1583*4882a593Smuzhiyun	or	SINDEX, LAST_SEG;
1584*4882a593Smuzhiyun	clr	SG_STATE;
1585*4882a593Smuzhiyun	mov	SG_CACHE_PRE, SINDEX;
1586*4882a593Smuzhiyun	if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
1587*4882a593Smuzhiyun		/*
1588*4882a593Smuzhiyun		 * Use SCSIENWRDIS so that SCSIEN is never
1589*4882a593Smuzhiyun		 * modified by this operation.
1590*4882a593Smuzhiyun		 */
1591*4882a593Smuzhiyun		or	DFCNTRL, PRELOADEN|HDMAEN|SCSIENWRDIS;
1592*4882a593Smuzhiyun	} else {
1593*4882a593Smuzhiyun		or	DFCNTRL, PRELOADEN|HDMAEN;
1594*4882a593Smuzhiyun	}
1595*4882a593Smuzhiyun	/*
1596*4882a593Smuzhiyun	 * Do we have another segment in the cache?
1597*4882a593Smuzhiyun	 */
1598*4882a593Smuzhiyun	add	NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR;
1599*4882a593Smuzhiyun	jnc	return;
1600*4882a593Smuzhiyun	and	SG_STATE, ~SEGS_AVAIL ret;
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun/*
1603*4882a593Smuzhiyun * Initialize the DMA address and counter from the SCB.
1604*4882a593Smuzhiyun */
1605*4882a593Smuzhiyunload_first_seg:
1606*4882a593Smuzhiyun	bmov	HADDR, SCB_DATAPTR, 11;
1607*4882a593Smuzhiyun	and	REG_ISR, ~SG_FULL_RESID, SCB_SGPTR[0];
1608*4882a593Smuzhiyun	test	SCB_DATACNT[3], SG_LAST_SEG jz . + 2;
1609*4882a593Smuzhiyun	or	REG_ISR, LAST_SEG;
1610*4882a593Smuzhiyun	mov	SG_CACHE_PRE, REG_ISR;
1611*4882a593Smuzhiyun	mvi	DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
1612*4882a593Smuzhiyun	/*
1613*4882a593Smuzhiyun	 * Since we've are entering a data phase, we will
1614*4882a593Smuzhiyun	 * rely on the SCB_RESID* fields.  Initialize the
1615*4882a593Smuzhiyun	 * residual and clear the full residual flag.
1616*4882a593Smuzhiyun	 */
1617*4882a593Smuzhiyun	and	SCB_SGPTR[0], ~SG_FULL_RESID;
1618*4882a593Smuzhiyun	bmov	SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5;
1619*4882a593Smuzhiyun	/* If we need more S/G elements, tell the idle loop */
1620*4882a593Smuzhiyun	test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2;
1621*4882a593Smuzhiyun	mvi	SG_STATE, LOADING_NEEDED ret;
1622*4882a593Smuzhiyun	clr	SG_STATE ret;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyunp_data_handle_xfer:
1625*4882a593Smuzhiyun	call	setjmp;
1626*4882a593Smuzhiyun	test	SG_STATE, LOADING_NEEDED jnz service_fifo;
1627*4882a593Smuzhiyunp_data_clear_handler:
1628*4882a593Smuzhiyun	or	LONGJMP_ADDR[1], INVALID_ADDR ret;
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyunp_data:
1631*4882a593Smuzhiyun	test	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT	jz p_data_allowed;
1632*4882a593Smuzhiyun	SET_SEQINTCODE(PROTO_VIOLATION)
1633*4882a593Smuzhiyunp_data_allowed:
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun	test	SEQ_FLAGS, DPHASE	jz data_phase_initialize;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun	/*
1638*4882a593Smuzhiyun	 * If we re-enter the data phase after going through another
1639*4882a593Smuzhiyun	 * phase, our transfer location has almost certainly been
1640*4882a593Smuzhiyun	 * corrupted by the interveining, non-data, transfers.  Ask
1641*4882a593Smuzhiyun	 * the host driver to fix us up based on the transfer residual
1642*4882a593Smuzhiyun	 * unless we already know that we should be bitbucketing.
1643*4882a593Smuzhiyun	 */
1644*4882a593Smuzhiyun	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket;
1645*4882a593Smuzhiyun	SET_SEQINTCODE(PDATA_REINIT)
1646*4882a593Smuzhiyun	jmp	data_phase_inbounds;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyunp_data_bitbucket:
1649*4882a593Smuzhiyun	/*
1650*4882a593Smuzhiyun	 * Turn on `Bit Bucket' mode, wait until the target takes
1651*4882a593Smuzhiyun	 * us to another phase, and then notify the host.
1652*4882a593Smuzhiyun	 */
1653*4882a593Smuzhiyun	mov	SAVED_MODE, MODE_PTR;
1654*4882a593Smuzhiyun	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
1655*4882a593Smuzhiyun		jnz bitbucket_not_m_dff;
1656*4882a593Smuzhiyun	/*
1657*4882a593Smuzhiyun	 * Ensure that any FIFO contents are cleared out and the
1658*4882a593Smuzhiyun	 * FIFO free'd prior to starting the BITBUCKET.  BITBUCKET
1659*4882a593Smuzhiyun	 * doesn't discard data already in the FIFO.
1660*4882a593Smuzhiyun	 */
1661*4882a593Smuzhiyun	mvi	DFFSXFRCTL, RSTCHN|CLRSHCNT;
1662*4882a593Smuzhiyun	SET_MODE(M_SCSI, M_SCSI)
1663*4882a593Smuzhiyunbitbucket_not_m_dff:
1664*4882a593Smuzhiyun	or	SXFRCTL1,BITBUCKET;
1665*4882a593Smuzhiyun	/* Wait for non-data phase. */
1666*4882a593Smuzhiyun	test	SCSIPHASE, ~DATA_PHASE_MASK jz .;
1667*4882a593Smuzhiyun	and	SXFRCTL1, ~BITBUCKET;
1668*4882a593Smuzhiyun	RESTORE_MODE(SAVED_MODE)
1669*4882a593SmuzhiyunSET_SRC_MODE	M_DFF1;
1670*4882a593SmuzhiyunSET_DST_MODE	M_DFF1;
1671*4882a593Smuzhiyun	SET_SEQINTCODE(DATA_OVERRUN)
1672*4882a593Smuzhiyun	jmp	ITloop;
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyundata_phase_initialize:
1675*4882a593Smuzhiyun	test	SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket;
1676*4882a593Smuzhiyun	call	load_first_seg;
1677*4882a593Smuzhiyundata_phase_inbounds:
1678*4882a593Smuzhiyun	/* We have seen a data phase at least once. */
1679*4882a593Smuzhiyun	or	SEQ_FLAGS, DPHASE;
1680*4882a593Smuzhiyun	mov	SAVED_MODE, MODE_PTR;
1681*4882a593Smuzhiyun	test	SG_STATE, LOADING_NEEDED jz data_group_dma_loop;
1682*4882a593Smuzhiyun	call	p_data_handle_xfer;
1683*4882a593Smuzhiyundata_group_dma_loop:
1684*4882a593Smuzhiyun	/*
1685*4882a593Smuzhiyun	 * The transfer is complete if either the last segment
1686*4882a593Smuzhiyun	 * completes or the target changes phase.  Both conditions
1687*4882a593Smuzhiyun	 * will clear SCSIEN.
1688*4882a593Smuzhiyun	 */
1689*4882a593Smuzhiyun	call	idle_loop_service_fifos;
1690*4882a593Smuzhiyun	call	idle_loop_cchan;
1691*4882a593Smuzhiyun	call	idle_loop_gsfifo;
1692*4882a593Smuzhiyun	RESTORE_MODE(SAVED_MODE)
1693*4882a593Smuzhiyun	test	DFCNTRL, SCSIEN jnz data_group_dma_loop;
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyundata_group_dmafinish:
1696*4882a593Smuzhiyun	/*
1697*4882a593Smuzhiyun	 * The transfer has terminated either due to a phase
1698*4882a593Smuzhiyun	 * change, and/or the completion of the last segment.
1699*4882a593Smuzhiyun	 * We have two goals here.  Do as much other work
1700*4882a593Smuzhiyun	 * as possible while the data fifo drains on a read
1701*4882a593Smuzhiyun	 * and respond as quickly as possible to the standard
1702*4882a593Smuzhiyun	 * messages (save data pointers/disconnect and command
1703*4882a593Smuzhiyun	 * complete) that usually follow a data phase.
1704*4882a593Smuzhiyun	 */
1705*4882a593Smuzhiyun	call	calc_residual;
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun	/*
1708*4882a593Smuzhiyun	 * Go ahead and shut down the DMA engine now.
1709*4882a593Smuzhiyun	 */
1710*4882a593Smuzhiyun	test	DFCNTRL, DIRECTION jnz data_phase_finish;
1711*4882a593Smuzhiyundata_group_fifoflush:
1712*4882a593Smuzhiyun	if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
1713*4882a593Smuzhiyun		or	DFCNTRL, FIFOFLUSH;
1714*4882a593Smuzhiyun	}
1715*4882a593Smuzhiyun	/*
1716*4882a593Smuzhiyun	 * We have enabled the auto-ack feature.  This means
1717*4882a593Smuzhiyun	 * that the controller may have already transferred
1718*4882a593Smuzhiyun	 * some overrun bytes into the data FIFO and acked them
1719*4882a593Smuzhiyun	 * on the bus.  The only way to detect this situation is
1720*4882a593Smuzhiyun	 * to wait for LAST_SEG_DONE to come true on a completed
1721*4882a593Smuzhiyun	 * transfer and then test to see if the data FIFO is
1722*4882a593Smuzhiyun	 * non-empty.  We know there is more data yet to transfer
1723*4882a593Smuzhiyun	 * if SG_LIST_NULL is not yet set, thus there cannot be
1724*4882a593Smuzhiyun	 * an overrun.
1725*4882a593Smuzhiyun	 */
1726*4882a593Smuzhiyun	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish;
1727*4882a593Smuzhiyun	test	SG_CACHE_SHADOW, LAST_SEG_DONE jz .;
1728*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jnz data_phase_finish;
1729*4882a593Smuzhiyun	/* Overrun */
1730*4882a593Smuzhiyun	jmp	p_data;
1731*4882a593Smuzhiyundata_phase_finish:
1732*4882a593Smuzhiyun	/*
1733*4882a593Smuzhiyun	 * If the target has left us in data phase, loop through
1734*4882a593Smuzhiyun	 * the dma code again.  We will only loop if there is a
1735*4882a593Smuzhiyun	 * data overrun.
1736*4882a593Smuzhiyun	 */
1737*4882a593Smuzhiyun	if ((ahd->flags & AHD_TARGETROLE) != 0) {
1738*4882a593Smuzhiyun		test	SSTAT0, TARGET jnz data_phase_done;
1739*4882a593Smuzhiyun	}
1740*4882a593Smuzhiyun	if ((ahd->flags & AHD_INITIATORROLE) != 0) {
1741*4882a593Smuzhiyun		test	SSTAT1, REQINIT jz .;
1742*4882a593Smuzhiyun		test	SCSIPHASE, DATA_PHASE_MASK jnz p_data;
1743*4882a593Smuzhiyun	}
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyundata_phase_done:
1746*4882a593Smuzhiyun	/* Kill off any pending prefetch */
1747*4882a593Smuzhiyun	call	disable_ccsgen;
1748*4882a593Smuzhiyun	or 	LONGJMP_ADDR[1], INVALID_ADDR;
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun	if ((ahd->flags & AHD_TARGETROLE) != 0) {
1751*4882a593Smuzhiyun		test	SEQ_FLAGS, DPHASE_PENDING jz ITloop;
1752*4882a593Smuzhiyun		/*
1753*4882a593Smuzhiyun		and	SEQ_FLAGS, ~DPHASE_PENDING;
1754*4882a593Smuzhiyun		 * For data-in phases, wait for any pending acks from the
1755*4882a593Smuzhiyun		 * initiator before changing phase.  We only need to
1756*4882a593Smuzhiyun		 * send Ignore Wide Residue messages for data-in phases.
1757*4882a593Smuzhiyun		test	DFCNTRL, DIRECTION jz target_ITloop;
1758*4882a593Smuzhiyun		test	SSTAT1, REQINIT	jnz .;
1759*4882a593Smuzhiyun		test	SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jz target_ITloop;
1760*4882a593Smuzhiyun		SET_MODE(M_SCSI, M_SCSI)
1761*4882a593Smuzhiyun		test	NEGCONOPTS, WIDEXFER jz target_ITloop;
1762*4882a593Smuzhiyun		 */
1763*4882a593Smuzhiyun		/*
1764*4882a593Smuzhiyun		 * Issue an Ignore Wide Residue Message.
1765*4882a593Smuzhiyun		mvi	P_MESGIN|BSYO call change_phase;
1766*4882a593Smuzhiyun		mvi	MSG_IGN_WIDE_RESIDUE call target_outb;
1767*4882a593Smuzhiyun		mvi	1 call target_outb;
1768*4882a593Smuzhiyun		jmp	target_ITloop;
1769*4882a593Smuzhiyun		 */
1770*4882a593Smuzhiyun	} else {
1771*4882a593Smuzhiyun		jmp	ITloop;
1772*4882a593Smuzhiyun	}
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun/*
1775*4882a593Smuzhiyun * We assume that, even though data may still be
1776*4882a593Smuzhiyun * transferring to the host, that the SCSI side of
1777*4882a593Smuzhiyun * the DMA engine is now in a static state.  This
1778*4882a593Smuzhiyun * allows us to update our notion of where we are
1779*4882a593Smuzhiyun * in this transfer.
1780*4882a593Smuzhiyun *
1781*4882a593Smuzhiyun * If, by chance, we stopped before being able
1782*4882a593Smuzhiyun * to fetch additional segments for this transfer,
1783*4882a593Smuzhiyun * yet the last S/G was completely exhausted,
1784*4882a593Smuzhiyun * call our idle loop until it is able to load
1785*4882a593Smuzhiyun * another segment.  This will allow us to immediately
1786*4882a593Smuzhiyun * pickup on the next segment on the next data phase.
1787*4882a593Smuzhiyun *
1788*4882a593Smuzhiyun * If we happened to stop on the last segment, then
1789*4882a593Smuzhiyun * our residual information is still correct from
1790*4882a593Smuzhiyun * the idle loop and there is no need to perform
1791*4882a593Smuzhiyun * any fixups.
1792*4882a593Smuzhiyun */
1793*4882a593Smuzhiyunresidual_before_last_seg:
1794*4882a593Smuzhiyun	test    MDFFSTAT, SHVALID	jnz sgptr_fixup;
1795*4882a593Smuzhiyun	/*
1796*4882a593Smuzhiyun	 * Can never happen from an interrupt as the packetized
1797*4882a593Smuzhiyun	 * hardware will only interrupt us once SHVALID or
1798*4882a593Smuzhiyun	 * LAST_SEG_DONE.
1799*4882a593Smuzhiyun	 */
1800*4882a593Smuzhiyun	call	idle_loop_service_fifos;
1801*4882a593Smuzhiyun	RESTORE_MODE(SAVED_MODE)
1802*4882a593Smuzhiyun	/* FALLTHROUGH */
1803*4882a593Smuzhiyuncalc_residual:
1804*4882a593Smuzhiyun	test	SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg;
1805*4882a593Smuzhiyun	/* Record if we've consumed all S/G entries */
1806*4882a593Smuzhiyun	test	MDFFSTAT, SHVALID	jz . + 2;
1807*4882a593Smuzhiyun	bmov	SCB_RESIDUAL_DATACNT, SHCNT, 3 ret;
1808*4882a593Smuzhiyun	or	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret;
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyunsgptr_fixup:
1811*4882a593Smuzhiyun	/*
1812*4882a593Smuzhiyun	 * Fixup the residual next S/G pointer.  The S/G preload
1813*4882a593Smuzhiyun	 * feature of the chip allows us to load two elements
1814*4882a593Smuzhiyun	 * in addition to the currently active element.  We
1815*4882a593Smuzhiyun	 * store the bottom byte of the next S/G pointer in
1816*4882a593Smuzhiyun	 * the SG_CACHE_PTR register so we can restore the
1817*4882a593Smuzhiyun	 * correct value when the DMA completes.  If the next
1818*4882a593Smuzhiyun	 * sg ptr value has advanced to the point where higher
1819*4882a593Smuzhiyun	 * bytes in the address have been affected, fix them
1820*4882a593Smuzhiyun	 * too.
1821*4882a593Smuzhiyun	 */
1822*4882a593Smuzhiyun	test	SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done;
1823*4882a593Smuzhiyun	test	SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done;
1824*4882a593Smuzhiyun	add	SCB_RESIDUAL_SGPTR[1], -1;
1825*4882a593Smuzhiyun	adc	SCB_RESIDUAL_SGPTR[2], -1;
1826*4882a593Smuzhiyun	adc	SCB_RESIDUAL_SGPTR[3], -1;
1827*4882a593Smuzhiyunsgptr_fixup_done:
1828*4882a593Smuzhiyun	and	SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW;
1829*4882a593Smuzhiyun	clr	SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */
1830*4882a593Smuzhiyun	bmov	SCB_RESIDUAL_DATACNT, SHCNT, 3 ret;
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyunexport timer_isr:
1833*4882a593Smuzhiyun	call	issue_cmdcmplt;
1834*4882a593Smuzhiyun	mvi	CLRSEQINTSTAT, CLRSEQ_SWTMRTO;
1835*4882a593Smuzhiyun	if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {
1836*4882a593Smuzhiyun		/*
1837*4882a593Smuzhiyun		 * In H2A4, the mode pointer is not saved
1838*4882a593Smuzhiyun		 * for intvec2, but is restored on iret.
1839*4882a593Smuzhiyun		 * This can lead to the restoration of a
1840*4882a593Smuzhiyun		 * bogus mode ptr.  Manually clear the
1841*4882a593Smuzhiyun		 * intmask bits and do a normal return
1842*4882a593Smuzhiyun		 * to compensate.
1843*4882a593Smuzhiyun		 */
1844*4882a593Smuzhiyun		and	SEQINTCTL, ~(INTMASK2|INTMASK1) ret;
1845*4882a593Smuzhiyun	} else {
1846*4882a593Smuzhiyun		or	SEQINTCTL, IRET ret;
1847*4882a593Smuzhiyun	}
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyunexport seq_isr:
1850*4882a593Smuzhiyun	if ((ahd->features & AHD_RTI) == 0) {
1851*4882a593Smuzhiyun		/*
1852*4882a593Smuzhiyun		 * On RevA Silicon, if the target returns us to data-out
1853*4882a593Smuzhiyun		 * after we have already trained for data-out, it is
1854*4882a593Smuzhiyun		 * possible for us to transition the free running clock to
1855*4882a593Smuzhiyun		 * data-valid before the required 100ns P1 setup time (8 P1
1856*4882a593Smuzhiyun		 * assertions in fast-160 mode).  This will only happen if
1857*4882a593Smuzhiyun		 * this L-Q is a continuation of a data transfer for which
1858*4882a593Smuzhiyun		 * we have already prefetched data into our FIFO (LQ/Data
1859*4882a593Smuzhiyun		 * followed by LQ/Data for the same write transaction).
1860*4882a593Smuzhiyun		 * This can cause some target implementations to miss the
1861*4882a593Smuzhiyun		 * first few data transfers on the bus.  We detect this
1862*4882a593Smuzhiyun		 * situation by noticing that this is the first data transfer
1863*4882a593Smuzhiyun		 * after an LQ (LQIWORKONLQ true), that the data transfer is
1864*4882a593Smuzhiyun		 * a continuation of a transfer already setup in our FIFO
1865*4882a593Smuzhiyun		 * (SAVEPTRS interrupt), and that the transaction is a write
1866*4882a593Smuzhiyun		 * (DIRECTION set in DFCNTRL). The delay is performed by
1867*4882a593Smuzhiyun		 * disabling SCSIEN until we see the first REQ from the
1868*4882a593Smuzhiyun		 * target.
1869*4882a593Smuzhiyun		 *
1870*4882a593Smuzhiyun		 * First instruction in an ISR cannot be a branch on
1871*4882a593Smuzhiyun		 * Rev A.  Snapshot LQISTAT2 so the status is not missed
1872*4882a593Smuzhiyun		 * and deffer the test by one instruction.
1873*4882a593Smuzhiyun		 */
1874*4882a593Smuzhiyun		mov	REG_ISR, LQISTAT2;
1875*4882a593Smuzhiyun		test	REG_ISR, LQIWORKONLQ jz main_isr;
1876*4882a593Smuzhiyun		test	SEQINTSRC, SAVEPTRS  jz main_isr;
1877*4882a593Smuzhiyun		test	LONGJMP_ADDR[1], INVALID_ADDR jz saveptr_active_fifo;
1878*4882a593Smuzhiyun		/*
1879*4882a593Smuzhiyun		 * Switch to the active FIFO after clearing the snapshot
1880*4882a593Smuzhiyun		 * savepointer in the current FIFO.  We do this so that
1881*4882a593Smuzhiyun		 * a pending CTXTDONE or SAVEPTR is visible in the active
1882*4882a593Smuzhiyun		 * FIFO.  This status is the only way we can detect if we
1883*4882a593Smuzhiyun		 * have lost the race (e.g. host paused us) and our attempts
1884*4882a593Smuzhiyun		 * to disable the channel occurred after all REQs were
1885*4882a593Smuzhiyun		 * already seen and acked (REQINIT never comes true).
1886*4882a593Smuzhiyun		 */
1887*4882a593Smuzhiyun		mvi	DFFSXFRCTL, CLRCHN;
1888*4882a593Smuzhiyun		xor	MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
1889*4882a593Smuzhiyun		test	DFCNTRL, DIRECTION jz interrupt_return;
1890*4882a593Smuzhiyun		and	DFCNTRL, ~SCSIEN;
1891*4882a593Smuzhiyunsnapshot_wait_data_valid:
1892*4882a593Smuzhiyun		test	SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz interrupt_return;
1893*4882a593Smuzhiyun		test	SSTAT1, REQINIT	jz snapshot_wait_data_valid;
1894*4882a593Smuzhiyunsnapshot_data_valid:
1895*4882a593Smuzhiyun		or	DFCNTRL, SCSIEN;
1896*4882a593Smuzhiyun		or	SEQINTCTL, IRET ret;
1897*4882a593Smuzhiyunsnapshot_saveptr:
1898*4882a593Smuzhiyun		mvi	DFFSXFRCTL, CLRCHN;
1899*4882a593Smuzhiyun		or	SEQINTCTL, IRET ret;
1900*4882a593Smuzhiyunmain_isr:
1901*4882a593Smuzhiyun	}
1902*4882a593Smuzhiyun	test	SEQINTSRC, CFG4DATA	jnz cfg4data_intr;
1903*4882a593Smuzhiyun	test	SEQINTSRC, CFG4ISTAT	jnz cfg4istat_intr;
1904*4882a593Smuzhiyun	test	SEQINTSRC, SAVEPTRS	jnz saveptr_intr;
1905*4882a593Smuzhiyun	test	SEQINTSRC, CFG4ICMD	jnz cfg4icmd_intr;
1906*4882a593Smuzhiyun	SET_SEQINTCODE(INVALID_SEQINT)
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun/*
1909*4882a593Smuzhiyun * There are two types of save pointers interrupts:
1910*4882a593Smuzhiyun * The first is a snapshot save pointers where the current FIFO is not
1911*4882a593Smuzhiyun * active and contains a snapshot of the current poniter information.
1912*4882a593Smuzhiyun * This happens between packets in a stream for a single L_Q.  Since we
1913*4882a593Smuzhiyun * are not performing a pointer save, we can safely clear the channel
1914*4882a593Smuzhiyun * so it can be used for other transactions.  On RTI capable controllers,
1915*4882a593Smuzhiyun * where snapshots can, and are, disabled, the code to handle this type
1916*4882a593Smuzhiyun * of snapshot is not active.
1917*4882a593Smuzhiyun *
1918*4882a593Smuzhiyun * The second case is a save pointers on an active FIFO which occurs
1919*4882a593Smuzhiyun * if the target changes to a new L_Q or busfrees/QASes and the transfer
1920*4882a593Smuzhiyun * has a residual.  This should occur coincident with a ctxtdone.  We
1921*4882a593Smuzhiyun * disable the interrupt and allow our active routine to handle the
1922*4882a593Smuzhiyun * save.
1923*4882a593Smuzhiyun */
1924*4882a593Smuzhiyunsaveptr_intr:
1925*4882a593Smuzhiyun	if ((ahd->features & AHD_RTI) == 0) {
1926*4882a593Smuzhiyun		test	LONGJMP_ADDR[1], INVALID_ADDR jnz snapshot_saveptr;
1927*4882a593Smuzhiyun	}
1928*4882a593Smuzhiyunsaveptr_active_fifo:
1929*4882a593Smuzhiyun	and	SEQIMODE, ~ENSAVEPTRS;
1930*4882a593Smuzhiyun	or	SEQINTCTL, IRET ret;
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyuncfg4data_intr:
1933*4882a593Smuzhiyun	test	SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun_inc_use_count;
1934*4882a593Smuzhiyun	call	load_first_seg;
1935*4882a593Smuzhiyun	call	pkt_handle_xfer;
1936*4882a593Smuzhiyun	inc	SCB_FIFO_USE_COUNT;
1937*4882a593Smuzhiyuninterrupt_return:
1938*4882a593Smuzhiyun	or	SEQINTCTL, IRET ret;
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyuncfg4istat_intr:
1941*4882a593Smuzhiyun	call	freeze_queue;
1942*4882a593Smuzhiyun	add	NONE, -13, SCB_CDB_LEN;
1943*4882a593Smuzhiyun	jnc	cfg4istat_have_sense_addr;
1944*4882a593Smuzhiyun	test	SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr;
1945*4882a593Smuzhiyun	/*
1946*4882a593Smuzhiyun	 * Host sets up address/count and enables transfer.
1947*4882a593Smuzhiyun	 */
1948*4882a593Smuzhiyun	SET_SEQINTCODE(CFG4ISTAT_INTR)
1949*4882a593Smuzhiyun	jmp	cfg4istat_setup_handler;
1950*4882a593Smuzhiyuncfg4istat_have_sense_addr:
1951*4882a593Smuzhiyun	bmov	HADDR, SCB_SENSE_BUSADDR, 4;
1952*4882a593Smuzhiyun	mvi	HCNT[1], (AHD_SENSE_BUFSIZE >> 8);
1953*4882a593Smuzhiyun	mvi	SG_CACHE_PRE, LAST_SEG;
1954*4882a593Smuzhiyun	mvi	DFCNTRL, PRELOADEN|SCSIEN|HDMAEN;
1955*4882a593Smuzhiyuncfg4istat_setup_handler:
1956*4882a593Smuzhiyun	/*
1957*4882a593Smuzhiyun	 * Status pkt is transferring to host.
1958*4882a593Smuzhiyun	 * Wait in idle loop for transfer to complete.
1959*4882a593Smuzhiyun	 * If a command completed before an attempted
1960*4882a593Smuzhiyun	 * task management function completed, notify the host.
1961*4882a593Smuzhiyun	 */
1962*4882a593Smuzhiyun	test	SCB_TASK_MANAGEMENT, 0xFF jz cfg4istat_no_taskmgmt_func;
1963*4882a593Smuzhiyun	SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY)
1964*4882a593Smuzhiyuncfg4istat_no_taskmgmt_func:
1965*4882a593Smuzhiyun	call	pkt_handle_status;
1966*4882a593Smuzhiyun	or	SEQINTCTL, IRET ret;
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyuncfg4icmd_intr:
1969*4882a593Smuzhiyun	/*
1970*4882a593Smuzhiyun	 * In the case of DMAing a CDB from the host, the normal
1971*4882a593Smuzhiyun	 * CDB buffer is formatted with an 8 byte address followed
1972*4882a593Smuzhiyun	 * by a 1 byte count.
1973*4882a593Smuzhiyun	 */
1974*4882a593Smuzhiyun	bmov	HADDR[0], SCB_HOST_CDB_PTR, 9;
1975*4882a593Smuzhiyun	mvi	SG_CACHE_PRE, LAST_SEG;
1976*4882a593Smuzhiyun	mvi	DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
1977*4882a593Smuzhiyun	call	pkt_handle_cdb;
1978*4882a593Smuzhiyun	or	SEQINTCTL, IRET ret;
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun/*
1981*4882a593Smuzhiyun * See if the target has gone on in this context creating an
1982*4882a593Smuzhiyun * overrun condition.  For the write case, the hardware cannot
1983*4882a593Smuzhiyun * ack bytes until data are provided.  So, if the target begins
1984*4882a593Smuzhiyun * another  packet without changing contexts, implying we are
1985*4882a593Smuzhiyun * not sitting on a packet boundary, we are in an overrun
1986*4882a593Smuzhiyun * situation.  For the read case, the hardware will continue to
1987*4882a593Smuzhiyun * ack bytes into the FIFO, and may even ack the last overrun packet
1988*4882a593Smuzhiyun * into the FIFO.   If the FIFO should become non-empty, we are in
1989*4882a593Smuzhiyun * a read overrun case.
1990*4882a593Smuzhiyun */
1991*4882a593Smuzhiyun#define check_overrun							\
1992*4882a593Smuzhiyun	/* Not on a packet boundary. */					\
1993*4882a593Smuzhiyun	test 	MDFFSTAT, DLZERO jz pkt_handle_overrun;			\
1994*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jz pkt_handle_overrun
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyunpkt_handle_xfer:
1997*4882a593Smuzhiyun	test	SG_STATE, LOADING_NEEDED jz pkt_last_seg;
1998*4882a593Smuzhiyun	call	setjmp;
1999*4882a593Smuzhiyun	test	SEQINTSRC, SAVEPTRS jnz pkt_saveptrs;
2000*4882a593Smuzhiyun	test	SCSIPHASE, ~DATA_PHASE_MASK jz . + 2;
2001*4882a593Smuzhiyun	test	SCSISIGO, ATNO jnz . + 2;
2002*4882a593Smuzhiyun	test	SSTAT2, NONPACKREQ jz pkt_service_fifo;
2003*4882a593Smuzhiyun	/*
2004*4882a593Smuzhiyun	 * Defer handling of this NONPACKREQ until we
2005*4882a593Smuzhiyun	 * can be sure it pertains to this FIFO.  SAVEPTRS
2006*4882a593Smuzhiyun	 * will not be asserted if the NONPACKREQ is for us,
2007*4882a593Smuzhiyun	 * so we must simulate it if shadow is valid.  If
2008*4882a593Smuzhiyun	 * shadow is not valid, keep running this FIFO until we
2009*4882a593Smuzhiyun	 * have satisfied the transfer by loading segments and
2010*4882a593Smuzhiyun	 * waiting for either shadow valid or last_seg_done.
2011*4882a593Smuzhiyun	 */
2012*4882a593Smuzhiyun	test	MDFFSTAT, SHVALID jnz pkt_saveptrs;
2013*4882a593Smuzhiyunpkt_service_fifo:
2014*4882a593Smuzhiyun	test	SG_STATE, LOADING_NEEDED jnz service_fifo;
2015*4882a593Smuzhiyunpkt_last_seg:
2016*4882a593Smuzhiyun	call	setjmp;
2017*4882a593Smuzhiyun	test	SEQINTSRC, SAVEPTRS jnz pkt_saveptrs;
2018*4882a593Smuzhiyun	test	SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_last_seg_done;
2019*4882a593Smuzhiyun	test	SCSIPHASE, ~DATA_PHASE_MASK jz . + 2;
2020*4882a593Smuzhiyun	test	SCSISIGO, ATNO jnz . + 2;
2021*4882a593Smuzhiyun	test	SSTAT2, NONPACKREQ jz return;
2022*4882a593Smuzhiyun	test	MDFFSTAT, SHVALID jz return;
2023*4882a593Smuzhiyun	/* FALLTHROUGH */
2024*4882a593Smuzhiyun
2025*4882a593Smuzhiyun/*
2026*4882a593Smuzhiyun * Either a SAVEPTRS interrupt condition is pending for this FIFO
2027*4882a593Smuzhiyun * or we have a pending NONPACKREQ for this FIFO.  We differentiate
2028*4882a593Smuzhiyun * between the two by capturing the state of the SAVEPTRS interrupt
2029*4882a593Smuzhiyun * prior to clearing this status and executing the common code for
2030*4882a593Smuzhiyun * these two cases.
2031*4882a593Smuzhiyun */
2032*4882a593Smuzhiyunpkt_saveptrs:
2033*4882a593SmuzhiyunBEGIN_CRITICAL;
2034*4882a593Smuzhiyun	if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
2035*4882a593Smuzhiyun		or	DFCNTRL, FIFOFLUSH;
2036*4882a593Smuzhiyun	}
2037*4882a593Smuzhiyun	mov	REG0, SEQINTSRC;
2038*4882a593Smuzhiyun	call	calc_residual;
2039*4882a593Smuzhiyun	call	save_pointers;
2040*4882a593Smuzhiyun	mvi	CLRSEQINTSRC, CLRSAVEPTRS;
2041*4882a593Smuzhiyun	call	disable_ccsgen;
2042*4882a593Smuzhiyun	or	SEQIMODE, ENSAVEPTRS;
2043*4882a593Smuzhiyun	test	DFCNTRL, DIRECTION jnz pkt_saveptrs_check_status;
2044*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jnz pkt_saveptrs_check_status;
2045*4882a593Smuzhiyun	/*
2046*4882a593Smuzhiyun	 * Keep a handler around for this FIFO until it drains
2047*4882a593Smuzhiyun	 * to the host to guarantee that we don't complete the
2048*4882a593Smuzhiyun	 * command to the host before the data arrives.
2049*4882a593Smuzhiyun	 */
2050*4882a593Smuzhiyunpkt_saveptrs_wait_fifoemp:
2051*4882a593Smuzhiyun	call	setjmp;
2052*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jz return;
2053*4882a593Smuzhiyunpkt_saveptrs_check_status:
2054*4882a593Smuzhiyun	or	LONGJMP_ADDR[1], INVALID_ADDR;
2055*4882a593Smuzhiyun	test	REG0, SAVEPTRS jz unexpected_nonpkt_phase;
2056*4882a593Smuzhiyun	dec	SCB_FIFO_USE_COUNT;
2057*4882a593Smuzhiyun	test	SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
2058*4882a593Smuzhiyun	mvi	DFFSXFRCTL, CLRCHN ret;
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun/*
2061*4882a593Smuzhiyun * LAST_SEG_DONE status has been seen in the current FIFO.
2062*4882a593Smuzhiyun * This indicates that all of the allowed data for this
2063*4882a593Smuzhiyun * command has transferred across the SCSI and host buses.
2064*4882a593Smuzhiyun * Check for overrun and see if we can complete this command.
2065*4882a593Smuzhiyun */
2066*4882a593Smuzhiyunpkt_last_seg_done:
2067*4882a593Smuzhiyun	/*
2068*4882a593Smuzhiyun	 * Mark transfer as completed.
2069*4882a593Smuzhiyun	 */
2070*4882a593Smuzhiyun	or	SCB_SGPTR, SG_LIST_NULL;
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun	/*
2073*4882a593Smuzhiyun	 * Wait for the current context to finish to verify that
2074*4882a593Smuzhiyun	 * no overrun condition has occurred.
2075*4882a593Smuzhiyun	 */
2076*4882a593Smuzhiyun	test	SEQINTSRC, CTXTDONE jnz pkt_ctxt_done;
2077*4882a593Smuzhiyun	call	setjmp;
2078*4882a593Smuzhiyunpkt_wait_ctxt_done_loop:
2079*4882a593Smuzhiyun	test	SEQINTSRC, CTXTDONE jnz pkt_ctxt_done;
2080*4882a593Smuzhiyun	/*
2081*4882a593Smuzhiyun	 * A sufficiently large overrun or a NONPACKREQ may
2082*4882a593Smuzhiyun	 * prevent CTXTDONE from ever asserting, so we must
2083*4882a593Smuzhiyun	 * poll for these statuses too.
2084*4882a593Smuzhiyun	 */
2085*4882a593Smuzhiyun	check_overrun;
2086*4882a593Smuzhiyun	test	SSTAT2, NONPACKREQ jz return;
2087*4882a593Smuzhiyun	test	SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase;
2088*4882a593Smuzhiyun	/* FALLTHROUGH */
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyunpkt_ctxt_done:
2091*4882a593Smuzhiyun	check_overrun;
2092*4882a593Smuzhiyun	or	LONGJMP_ADDR[1], INVALID_ADDR;
2093*4882a593Smuzhiyun	/*
2094*4882a593Smuzhiyun	 * If status has been received, it is safe to skip
2095*4882a593Smuzhiyun	 * the check to see if another FIFO is active because
2096*4882a593Smuzhiyun	 * LAST_SEG_DONE has been observed.  However, we check
2097*4882a593Smuzhiyun	 * the FIFO anyway since it costs us only one extra
2098*4882a593Smuzhiyun	 * instruction to leverage common code to perform the
2099*4882a593Smuzhiyun	 * SCB completion.
2100*4882a593Smuzhiyun	 */
2101*4882a593Smuzhiyun	dec	SCB_FIFO_USE_COUNT;
2102*4882a593Smuzhiyun	test	SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
2103*4882a593Smuzhiyun	mvi	DFFSXFRCTL, CLRCHN ret;
2104*4882a593SmuzhiyunEND_CRITICAL;
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun/*
2107*4882a593Smuzhiyun * Must wait until CDB xfer is over before issuing the
2108*4882a593Smuzhiyun * clear channel.
2109*4882a593Smuzhiyun */
2110*4882a593Smuzhiyunpkt_handle_cdb:
2111*4882a593Smuzhiyun	call	setjmp;
2112*4882a593Smuzhiyun	test	SG_CACHE_SHADOW, LAST_SEG_DONE jz return;
2113*4882a593Smuzhiyun	or	LONGJMP_ADDR[1], INVALID_ADDR;
2114*4882a593Smuzhiyun	mvi	DFFSXFRCTL, CLRCHN ret;
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun/*
2117*4882a593Smuzhiyun * Watch over the status transfer.  Our host sense buffer is
2118*4882a593Smuzhiyun * large enough to take the maximum allowed status packet.
2119*4882a593Smuzhiyun * None-the-less, we must still catch and report overruns to
2120*4882a593Smuzhiyun * the host.  Additionally, properly catch unexpected non-packet
2121*4882a593Smuzhiyun * phases that are typically caused by CRC errors in status packet
2122*4882a593Smuzhiyun * transmission.
2123*4882a593Smuzhiyun */
2124*4882a593Smuzhiyunpkt_handle_status:
2125*4882a593Smuzhiyun	call	setjmp;
2126*4882a593Smuzhiyun	test	SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun;
2127*4882a593Smuzhiyun	test	SEQINTSRC, CTXTDONE jz pkt_status_check_nonpackreq;
2128*4882a593Smuzhiyun	test	SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun;
2129*4882a593Smuzhiyunpkt_status_IU_done:
2130*4882a593Smuzhiyun	if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
2131*4882a593Smuzhiyun		or	DFCNTRL, FIFOFLUSH;
2132*4882a593Smuzhiyun	}
2133*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jz return;
2134*4882a593SmuzhiyunBEGIN_CRITICAL;
2135*4882a593Smuzhiyun	or	LONGJMP_ADDR[1], INVALID_ADDR;
2136*4882a593Smuzhiyun	mvi	SCB_SCSI_STATUS, STATUS_PKT_SENSE;
2137*4882a593Smuzhiyun	or	SCB_CONTROL, STATUS_RCVD;
2138*4882a593Smuzhiyun	jmp	pkt_complete_scb_if_fifos_idle;
2139*4882a593SmuzhiyunEND_CRITICAL;
2140*4882a593Smuzhiyunpkt_status_check_overrun:
2141*4882a593Smuzhiyun	/*
2142*4882a593Smuzhiyun	 * Status PKT overruns are uncerimoniously recovered with a
2143*4882a593Smuzhiyun	 * bus reset.  If we've overrun, let the host know so that
2144*4882a593Smuzhiyun	 * recovery can be performed.
2145*4882a593Smuzhiyun	 *
2146*4882a593Smuzhiyun	 * LAST_SEG_DONE has been observed.  If either CTXTDONE or
2147*4882a593Smuzhiyun	 * a NONPACKREQ phase change have occurred and the FIFO is
2148*4882a593Smuzhiyun	 * empty, there is no overrun.
2149*4882a593Smuzhiyun	 */
2150*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jz pkt_status_report_overrun;
2151*4882a593Smuzhiyun	test	SEQINTSRC, CTXTDONE jz . + 2;
2152*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jnz pkt_status_IU_done;
2153*4882a593Smuzhiyun	test	SCSIPHASE, ~DATA_PHASE_MASK jz return;
2154*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jnz pkt_status_check_nonpackreq;
2155*4882a593Smuzhiyunpkt_status_report_overrun:
2156*4882a593Smuzhiyun	SET_SEQINTCODE(STATUS_OVERRUN)
2157*4882a593Smuzhiyun	/* SEQUENCER RESTARTED */
2158*4882a593Smuzhiyunpkt_status_check_nonpackreq:
2159*4882a593Smuzhiyun	/*
2160*4882a593Smuzhiyun	 * CTXTDONE may be held off if a NONPACKREQ is associated with
2161*4882a593Smuzhiyun	 * the current context.  If a NONPACKREQ is observed, decide
2162*4882a593Smuzhiyun	 * if it is for the current context.  If it is for the current
2163*4882a593Smuzhiyun	 * context, we must defer NONPACKREQ processing until all data
2164*4882a593Smuzhiyun	 * has transferred to the host.
2165*4882a593Smuzhiyun	 */
2166*4882a593Smuzhiyun	test	SCSIPHASE, ~DATA_PHASE_MASK jz return;
2167*4882a593Smuzhiyun	test	SCSISIGO, ATNO jnz . + 2;
2168*4882a593Smuzhiyun	test	SSTAT2, NONPACKREQ jz return;
2169*4882a593Smuzhiyun	test	SEQINTSRC, CTXTDONE jnz pkt_status_IU_done;
2170*4882a593Smuzhiyun	test	DFSTATUS, FIFOEMP jz return;
2171*4882a593Smuzhiyun	/*
2172*4882a593Smuzhiyun	 * The unexpected nonpkt phase handler assumes that any
2173*4882a593Smuzhiyun	 * data channel use will have a FIFO reference count.  It
2174*4882a593Smuzhiyun	 * turns out that the status handler doesn't need a references
2175*4882a593Smuzhiyun	 * count since the status received flag, and thus completion
2176*4882a593Smuzhiyun	 * processing, cannot be set until the handler is finished.
2177*4882a593Smuzhiyun	 * We increment the count here to make the nonpkt handler
2178*4882a593Smuzhiyun	 * happy.
2179*4882a593Smuzhiyun	 */
2180*4882a593Smuzhiyun	inc	SCB_FIFO_USE_COUNT;
2181*4882a593Smuzhiyun	/* FALLTHROUGH */
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun/*
2184*4882a593Smuzhiyun * Nonpackreq is a polled status.  It can come true in three situations:
2185*4882a593Smuzhiyun * we have received an L_Q, we have sent one or more L_Qs, or there is no
2186*4882a593Smuzhiyun * L_Q context associated with this REQ (REQ occurs immediately after a
2187*4882a593Smuzhiyun * (re)selection).  Routines that know that the context responsible for this
2188*4882a593Smuzhiyun * nonpackreq call directly into unexpected_nonpkt_phase.  In the case of the
2189*4882a593Smuzhiyun * top level idle loop, we exhaust all active contexts prior to determining that
2190*4882a593Smuzhiyun * we simply do not have the full I_T_L_Q for this phase.
2191*4882a593Smuzhiyun */
2192*4882a593Smuzhiyununexpected_nonpkt_phase_find_ctxt:
2193*4882a593Smuzhiyun	/*
2194*4882a593Smuzhiyun	 * This nonpackreq is most likely associated with one of the tags
2195*4882a593Smuzhiyun	 * in a FIFO or an outgoing LQ.  Only treat it as an I_T only
2196*4882a593Smuzhiyun	 * nonpackreq if we've cleared out the FIFOs and handled any
2197*4882a593Smuzhiyun	 * pending SELDO.
2198*4882a593Smuzhiyun	 */
2199*4882a593SmuzhiyunSET_SRC_MODE	M_SCSI;
2200*4882a593SmuzhiyunSET_DST_MODE	M_SCSI;
2201*4882a593Smuzhiyun	and	A, FIFO1FREE|FIFO0FREE, DFFSTAT;
2202*4882a593Smuzhiyun	cmp	A, FIFO1FREE|FIFO0FREE jne return;
2203*4882a593Smuzhiyun	test	SSTAT0, SELDO jnz return;
2204*4882a593Smuzhiyun	mvi	SCBPTR[1], SCB_LIST_NULL;
2205*4882a593Smuzhiyununexpected_nonpkt_phase:
2206*4882a593Smuzhiyun	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
2207*4882a593Smuzhiyun		jnz unexpected_nonpkt_mode_cleared;
2208*4882a593SmuzhiyunSET_SRC_MODE	M_DFF0;
2209*4882a593SmuzhiyunSET_DST_MODE	M_DFF0;
2210*4882a593Smuzhiyun	or	LONGJMP_ADDR[1], INVALID_ADDR;
2211*4882a593Smuzhiyun	dec	SCB_FIFO_USE_COUNT;
2212*4882a593Smuzhiyun	mvi	DFFSXFRCTL, CLRCHN;
2213*4882a593Smuzhiyununexpected_nonpkt_mode_cleared:
2214*4882a593Smuzhiyun	mvi	CLRSINT2, CLRNONPACKREQ;
2215*4882a593Smuzhiyun	if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
2216*4882a593Smuzhiyun		/*
2217*4882a593Smuzhiyun		 * Test to ensure that the bus has not
2218*4882a593Smuzhiyun		 * already gone free prior to clearing
2219*4882a593Smuzhiyun		 * any stale busfree status.  This avoids
2220*4882a593Smuzhiyun		 * a window whereby a busfree just after
2221*4882a593Smuzhiyun		 * a selection could be missed.
2222*4882a593Smuzhiyun		 */
2223*4882a593Smuzhiyun		test	SCSISIGI, BSYI jz . + 2;
2224*4882a593Smuzhiyun		mvi	CLRSINT1,CLRBUSFREE;
2225*4882a593Smuzhiyun		or	SIMODE1, ENBUSFREE;
2226*4882a593Smuzhiyun	}
2227*4882a593Smuzhiyun	test	SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase;
2228*4882a593Smuzhiyun	SET_SEQINTCODE(ENTERING_NONPACK)
2229*4882a593Smuzhiyun	jmp	ITloop;
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyunillegal_phase:
2232*4882a593Smuzhiyun	SET_SEQINTCODE(ILLEGAL_PHASE)
2233*4882a593Smuzhiyun	jmp	ITloop;
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun/*
2236*4882a593Smuzhiyun * We have entered an overrun situation.  If we have working
2237*4882a593Smuzhiyun * BITBUCKET, flip that on and let the hardware eat any overrun
2238*4882a593Smuzhiyun * data.  Otherwise use an overrun buffer in the host to simulate
2239*4882a593Smuzhiyun * BITBUCKET.
2240*4882a593Smuzhiyun */
2241*4882a593Smuzhiyunpkt_handle_overrun_inc_use_count:
2242*4882a593Smuzhiyun	inc	SCB_FIFO_USE_COUNT;
2243*4882a593Smuzhiyunpkt_handle_overrun:
2244*4882a593Smuzhiyun	SET_SEQINTCODE(CFG4OVERRUN)
2245*4882a593Smuzhiyun	call	freeze_queue;
2246*4882a593Smuzhiyun	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) {
2247*4882a593Smuzhiyun		or	DFFSXFRCTL, DFFBITBUCKET;
2248*4882a593SmuzhiyunSET_SRC_MODE	M_DFF1;
2249*4882a593SmuzhiyunSET_DST_MODE	M_DFF1;
2250*4882a593Smuzhiyun	} else {
2251*4882a593Smuzhiyun		call	load_overrun_buf;
2252*4882a593Smuzhiyun		mvi	DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN);
2253*4882a593Smuzhiyun	}
2254*4882a593Smuzhiyun	call	setjmp;
2255*4882a593Smuzhiyun	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
2256*4882a593Smuzhiyun		test	DFSTATUS, PRELOAD_AVAIL jz overrun_load_done;
2257*4882a593Smuzhiyun		call	load_overrun_buf;
2258*4882a593Smuzhiyun		or	DFCNTRL, PRELOADEN;
2259*4882a593Smuzhiyunoverrun_load_done:
2260*4882a593Smuzhiyun		test	SEQINTSRC, CTXTDONE jnz pkt_overrun_end;
2261*4882a593Smuzhiyun	} else {
2262*4882a593Smuzhiyun		test	DFFSXFRCTL, DFFBITBUCKET jz pkt_overrun_end;
2263*4882a593Smuzhiyun	}
2264*4882a593Smuzhiyun	test	SSTAT2, NONPACKREQ jz return;
2265*4882a593Smuzhiyunpkt_overrun_end:
2266*4882a593Smuzhiyun	or	SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID;
2267*4882a593Smuzhiyun	test	SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase;
2268*4882a593Smuzhiyun	dec	SCB_FIFO_USE_COUNT;
2269*4882a593Smuzhiyun	or	LONGJMP_ADDR[1], INVALID_ADDR;
2270*4882a593Smuzhiyun	test	SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
2271*4882a593Smuzhiyun	mvi	DFFSXFRCTL, CLRCHN ret;
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyunif ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
2274*4882a593Smuzhiyunload_overrun_buf:
2275*4882a593Smuzhiyun	/*
2276*4882a593Smuzhiyun	 * Load a dummy segment if preload space is available.
2277*4882a593Smuzhiyun	 */
2278*4882a593Smuzhiyun	mov 	HADDR[0], SHARED_DATA_ADDR;
2279*4882a593Smuzhiyun	add	HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1];
2280*4882a593Smuzhiyun	mov	ACCUM_SAVE, A;
2281*4882a593Smuzhiyun	clr	A;
2282*4882a593Smuzhiyun	adc	HADDR[2], A, SHARED_DATA_ADDR[2];
2283*4882a593Smuzhiyun	adc	HADDR[3], A, SHARED_DATA_ADDR[3];
2284*4882a593Smuzhiyun	mov	A, ACCUM_SAVE;
2285*4882a593Smuzhiyun	bmov	HADDR[4], ALLZEROS, 4;
2286*4882a593Smuzhiyun	/* PKT_OVERRUN_BUFSIZE is a multiple of 256 */
2287*4882a593Smuzhiyun	clr	HCNT[0];
2288*4882a593Smuzhiyun	mvi	HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF);
2289*4882a593Smuzhiyun	clr	HCNT[2] ret;
2290*4882a593Smuzhiyun}
2291