1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
4*4882a593Smuzhiyun * of PCI-SCSI IO processors.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This driver is derived from the Linux sym53c8xx driver.
9*4882a593Smuzhiyun * Copyright (C) 1998-2000 Gerard Roudier
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
12*4882a593Smuzhiyun * a port of the FreeBSD ncr driver to Linux-1.2.13.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The original ncr driver has been written for 386bsd and FreeBSD by
15*4882a593Smuzhiyun * Wolfgang Stanglmeier <wolf@cologne.de>
16*4882a593Smuzhiyun * Stefan Esser <se@mi.Uni-Koeln.de>
17*4882a593Smuzhiyun * Copyright (C) 1994 Wolfgang Stanglmeier
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Other major contributions:
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * NVRAM detection and reading.
22*4882a593Smuzhiyun * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun *-----------------------------------------------------------------------------
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <linux/gfp.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #ifndef SYM_HIPD_H
30*4882a593Smuzhiyun #define SYM_HIPD_H
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * Generic driver options.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * They may be defined in platform specific headers, if they
36*4882a593Smuzhiyun * are useful.
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * SYM_OPT_HANDLE_DEVICE_QUEUEING
39*4882a593Smuzhiyun * When this option is set, the driver will use a queue per
40*4882a593Smuzhiyun * device and handle QUEUE FULL status requeuing internally.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * SYM_OPT_LIMIT_COMMAND_REORDERING
43*4882a593Smuzhiyun * When this option is set, the driver tries to limit tagged
44*4882a593Smuzhiyun * command reordering to some reasonable value.
45*4882a593Smuzhiyun * (set for Linux)
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun #if 0
48*4882a593Smuzhiyun #define SYM_OPT_HANDLE_DEVICE_QUEUEING
49*4882a593Smuzhiyun #define SYM_OPT_LIMIT_COMMAND_REORDERING
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Active debugging tags and verbosity.
54*4882a593Smuzhiyun * Both DEBUG_FLAGS and sym_verbose can be redefined
55*4882a593Smuzhiyun * by the platform specific code to something else.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun #define DEBUG_ALLOC (0x0001)
58*4882a593Smuzhiyun #define DEBUG_PHASE (0x0002)
59*4882a593Smuzhiyun #define DEBUG_POLL (0x0004)
60*4882a593Smuzhiyun #define DEBUG_QUEUE (0x0008)
61*4882a593Smuzhiyun #define DEBUG_RESULT (0x0010)
62*4882a593Smuzhiyun #define DEBUG_SCATTER (0x0020)
63*4882a593Smuzhiyun #define DEBUG_SCRIPT (0x0040)
64*4882a593Smuzhiyun #define DEBUG_TINY (0x0080)
65*4882a593Smuzhiyun #define DEBUG_TIMING (0x0100)
66*4882a593Smuzhiyun #define DEBUG_NEGO (0x0200)
67*4882a593Smuzhiyun #define DEBUG_TAGS (0x0400)
68*4882a593Smuzhiyun #define DEBUG_POINTER (0x0800)
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #ifndef DEBUG_FLAGS
71*4882a593Smuzhiyun #define DEBUG_FLAGS (0x0000)
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #ifndef sym_verbose
75*4882a593Smuzhiyun #define sym_verbose (np->verbose)
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * These ones should have been already defined.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun #ifndef assert
82*4882a593Smuzhiyun #define assert(expression) { \
83*4882a593Smuzhiyun if (!(expression)) { \
84*4882a593Smuzhiyun (void)panic( \
85*4882a593Smuzhiyun "assertion \"%s\" failed: file \"%s\", line %d\n", \
86*4882a593Smuzhiyun #expression, \
87*4882a593Smuzhiyun __FILE__, __LINE__); \
88*4882a593Smuzhiyun } \
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun #endif
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * Number of tasks per device we want to handle.
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun #if SYM_CONF_MAX_TAG_ORDER > 8
96*4882a593Smuzhiyun #error "more than 256 tags per logical unit not allowed."
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER)
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * Donnot use more tasks that we can handle.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun #ifndef SYM_CONF_MAX_TAG
104*4882a593Smuzhiyun #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
105*4882a593Smuzhiyun #endif
106*4882a593Smuzhiyun #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
107*4882a593Smuzhiyun #undef SYM_CONF_MAX_TAG
108*4882a593Smuzhiyun #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * This one means 'NO TAG for this job'
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun #define NO_TAG (256)
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Number of SCSI targets.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun #if SYM_CONF_MAX_TARGET > 16
120*4882a593Smuzhiyun #error "more than 16 targets not allowed."
121*4882a593Smuzhiyun #endif
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * Number of logical units per target.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun #if SYM_CONF_MAX_LUN > 64
127*4882a593Smuzhiyun #error "more than 64 logical units per target not allowed."
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * Asynchronous pre-scaler (ns). Shall be 40 for
132*4882a593Smuzhiyun * the SCSI timings to be compliant.
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun #define SYM_CONF_MIN_ASYNC (40)
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * MEMORY ALLOCATOR.
139*4882a593Smuzhiyun */
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #define SYM_MEM_WARN 1 /* Warn on failed operations */
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun #define SYM_MEM_PAGE_ORDER 0 /* 1 PAGE maximum */
144*4882a593Smuzhiyun #define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
145*4882a593Smuzhiyun #define SYM_MEM_FREE_UNUSED /* Free unused pages immediately */
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun * Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16.
148*4882a593Smuzhiyun * Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized.
149*4882a593Smuzhiyun * (1 PAGE at a time is just fine).
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun #define SYM_MEM_SHIFT 4
152*4882a593Smuzhiyun #define SYM_MEM_CLUSTER_SIZE (1UL << SYM_MEM_CLUSTER_SHIFT)
153*4882a593Smuzhiyun #define SYM_MEM_CLUSTER_MASK (SYM_MEM_CLUSTER_SIZE-1)
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun * Number of entries in the START and DONE queues.
157*4882a593Smuzhiyun *
158*4882a593Smuzhiyun * We limit to 1 PAGE in order to succeed allocation of
159*4882a593Smuzhiyun * these queues. Each entry is 8 bytes long (2 DWORDS).
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun #ifdef SYM_CONF_MAX_START
162*4882a593Smuzhiyun #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
165*4882a593Smuzhiyun #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
166*4882a593Smuzhiyun #endif
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun #if SYM_CONF_MAX_QUEUE > SYM_MEM_CLUSTER_SIZE/8
169*4882a593Smuzhiyun #undef SYM_CONF_MAX_QUEUE
170*4882a593Smuzhiyun #define SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8)
171*4882a593Smuzhiyun #undef SYM_CONF_MAX_START
172*4882a593Smuzhiyun #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
173*4882a593Smuzhiyun #endif
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * For this one, we want a short name :-)
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun #define MAX_QUEUE SYM_CONF_MAX_QUEUE
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Common definitions for both bus space based and legacy IO methods.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun #define INB_OFF(np, o) ioread8(np->s.ioaddr + (o))
185*4882a593Smuzhiyun #define INW_OFF(np, o) ioread16(np->s.ioaddr + (o))
186*4882a593Smuzhiyun #define INL_OFF(np, o) ioread32(np->s.ioaddr + (o))
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #define OUTB_OFF(np, o, val) iowrite8((val), np->s.ioaddr + (o))
189*4882a593Smuzhiyun #define OUTW_OFF(np, o, val) iowrite16((val), np->s.ioaddr + (o))
190*4882a593Smuzhiyun #define OUTL_OFF(np, o, val) iowrite32((val), np->s.ioaddr + (o))
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun #define INB(np, r) INB_OFF(np, offsetof(struct sym_reg, r))
193*4882a593Smuzhiyun #define INW(np, r) INW_OFF(np, offsetof(struct sym_reg, r))
194*4882a593Smuzhiyun #define INL(np, r) INL_OFF(np, offsetof(struct sym_reg, r))
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun #define OUTB(np, r, v) OUTB_OFF(np, offsetof(struct sym_reg, r), (v))
197*4882a593Smuzhiyun #define OUTW(np, r, v) OUTW_OFF(np, offsetof(struct sym_reg, r), (v))
198*4882a593Smuzhiyun #define OUTL(np, r, v) OUTL_OFF(np, offsetof(struct sym_reg, r), (v))
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #define OUTONB(np, r, m) OUTB(np, r, INB(np, r) | (m))
201*4882a593Smuzhiyun #define OUTOFFB(np, r, m) OUTB(np, r, INB(np, r) & ~(m))
202*4882a593Smuzhiyun #define OUTONW(np, r, m) OUTW(np, r, INW(np, r) | (m))
203*4882a593Smuzhiyun #define OUTOFFW(np, r, m) OUTW(np, r, INW(np, r) & ~(m))
204*4882a593Smuzhiyun #define OUTONL(np, r, m) OUTL(np, r, INL(np, r) | (m))
205*4882a593Smuzhiyun #define OUTOFFL(np, r, m) OUTL(np, r, INL(np, r) & ~(m))
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * We normally want the chip to have a consistent view
209*4882a593Smuzhiyun * of driver internal data structures when we restart it.
210*4882a593Smuzhiyun * Thus these macros.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun #define OUTL_DSP(np, v) \
213*4882a593Smuzhiyun do { \
214*4882a593Smuzhiyun MEMORY_WRITE_BARRIER(); \
215*4882a593Smuzhiyun OUTL(np, nc_dsp, (v)); \
216*4882a593Smuzhiyun } while (0)
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun #define OUTONB_STD() \
219*4882a593Smuzhiyun do { \
220*4882a593Smuzhiyun MEMORY_WRITE_BARRIER(); \
221*4882a593Smuzhiyun OUTONB(np, nc_dcntl, (STD|NOCOM)); \
222*4882a593Smuzhiyun } while (0)
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * Command control block states.
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun #define HS_IDLE (0)
228*4882a593Smuzhiyun #define HS_BUSY (1)
229*4882a593Smuzhiyun #define HS_NEGOTIATE (2) /* sync/wide data transfer*/
230*4882a593Smuzhiyun #define HS_DISCONNECT (3) /* Disconnected by target */
231*4882a593Smuzhiyun #define HS_WAIT (4) /* waiting for resource */
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun #define HS_DONEMASK (0x80)
234*4882a593Smuzhiyun #define HS_COMPLETE (4|HS_DONEMASK)
235*4882a593Smuzhiyun #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
236*4882a593Smuzhiyun #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */
237*4882a593Smuzhiyun #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * Software Interrupt Codes
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun #define SIR_BAD_SCSI_STATUS (1)
243*4882a593Smuzhiyun #define SIR_SEL_ATN_NO_MSG_OUT (2)
244*4882a593Smuzhiyun #define SIR_MSG_RECEIVED (3)
245*4882a593Smuzhiyun #define SIR_MSG_WEIRD (4)
246*4882a593Smuzhiyun #define SIR_NEGO_FAILED (5)
247*4882a593Smuzhiyun #define SIR_NEGO_PROTO (6)
248*4882a593Smuzhiyun #define SIR_SCRIPT_STOPPED (7)
249*4882a593Smuzhiyun #define SIR_REJECT_TO_SEND (8)
250*4882a593Smuzhiyun #define SIR_SWIDE_OVERRUN (9)
251*4882a593Smuzhiyun #define SIR_SODL_UNDERRUN (10)
252*4882a593Smuzhiyun #define SIR_RESEL_NO_MSG_IN (11)
253*4882a593Smuzhiyun #define SIR_RESEL_NO_IDENTIFY (12)
254*4882a593Smuzhiyun #define SIR_RESEL_BAD_LUN (13)
255*4882a593Smuzhiyun #define SIR_TARGET_SELECTED (14)
256*4882a593Smuzhiyun #define SIR_RESEL_BAD_I_T_L (15)
257*4882a593Smuzhiyun #define SIR_RESEL_BAD_I_T_L_Q (16)
258*4882a593Smuzhiyun #define SIR_ABORT_SENT (17)
259*4882a593Smuzhiyun #define SIR_RESEL_ABORTED (18)
260*4882a593Smuzhiyun #define SIR_MSG_OUT_DONE (19)
261*4882a593Smuzhiyun #define SIR_COMPLETE_ERROR (20)
262*4882a593Smuzhiyun #define SIR_DATA_OVERRUN (21)
263*4882a593Smuzhiyun #define SIR_BAD_PHASE (22)
264*4882a593Smuzhiyun #if SYM_CONF_DMA_ADDRESSING_MODE == 2
265*4882a593Smuzhiyun #define SIR_DMAP_DIRTY (23)
266*4882a593Smuzhiyun #define SIR_MAX (23)
267*4882a593Smuzhiyun #else
268*4882a593Smuzhiyun #define SIR_MAX (22)
269*4882a593Smuzhiyun #endif
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Extended error bit codes.
273*4882a593Smuzhiyun * xerr_status field of struct sym_ccb.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun #define XE_EXTRA_DATA (1) /* unexpected data phase */
276*4882a593Smuzhiyun #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */
277*4882a593Smuzhiyun #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */
278*4882a593Smuzhiyun #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */
279*4882a593Smuzhiyun #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun * Negotiation status.
283*4882a593Smuzhiyun * nego_status field of struct sym_ccb.
284*4882a593Smuzhiyun */
285*4882a593Smuzhiyun #define NS_SYNC (1)
286*4882a593Smuzhiyun #define NS_WIDE (2)
287*4882a593Smuzhiyun #define NS_PPR (3)
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun * A CCB hashed table is used to retrieve CCB address
291*4882a593Smuzhiyun * from DSA value.
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun #define CCB_HASH_SHIFT 8
294*4882a593Smuzhiyun #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
295*4882a593Smuzhiyun #define CCB_HASH_MASK (CCB_HASH_SIZE-1)
296*4882a593Smuzhiyun #if 1
297*4882a593Smuzhiyun #define CCB_HASH_CODE(dsa) \
298*4882a593Smuzhiyun (((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK)
299*4882a593Smuzhiyun #else
300*4882a593Smuzhiyun #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK)
301*4882a593Smuzhiyun #endif
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun #if SYM_CONF_DMA_ADDRESSING_MODE == 2
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * We may want to use segment registers for 64 bit DMA.
306*4882a593Smuzhiyun * 16 segments registers -> up to 64 GB addressable.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun #define SYM_DMAP_SHIFT (4)
309*4882a593Smuzhiyun #define SYM_DMAP_SIZE (1u<<SYM_DMAP_SHIFT)
310*4882a593Smuzhiyun #define SYM_DMAP_MASK (SYM_DMAP_SIZE-1)
311*4882a593Smuzhiyun #endif
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /*
314*4882a593Smuzhiyun * Device flags.
315*4882a593Smuzhiyun */
316*4882a593Smuzhiyun #define SYM_DISC_ENABLED (1)
317*4882a593Smuzhiyun #define SYM_TAGS_ENABLED (1<<1)
318*4882a593Smuzhiyun #define SYM_SCAN_BOOT_DISABLED (1<<2)
319*4882a593Smuzhiyun #define SYM_SCAN_LUNS_DISABLED (1<<3)
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * Host adapter miscellaneous flags.
323*4882a593Smuzhiyun */
324*4882a593Smuzhiyun #define SYM_AVOID_BUS_RESET (1)
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * Misc.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun #define SYM_SNOOP_TIMEOUT (10000000)
330*4882a593Smuzhiyun #define BUS_8_BIT 0
331*4882a593Smuzhiyun #define BUS_16_BIT 1
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * Gather negotiable parameters value
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun struct sym_trans {
337*4882a593Smuzhiyun u8 period;
338*4882a593Smuzhiyun u8 offset;
339*4882a593Smuzhiyun unsigned int width:1;
340*4882a593Smuzhiyun unsigned int iu:1;
341*4882a593Smuzhiyun unsigned int dt:1;
342*4882a593Smuzhiyun unsigned int qas:1;
343*4882a593Smuzhiyun unsigned int check_nego:1;
344*4882a593Smuzhiyun unsigned int renego:2;
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /*
348*4882a593Smuzhiyun * Global TCB HEADER.
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * Due to lack of indirect addressing on earlier NCR chips,
351*4882a593Smuzhiyun * this substructure is copied from the TCB to a global
352*4882a593Smuzhiyun * address after selection.
353*4882a593Smuzhiyun * For SYMBIOS chips that support LOAD/STORE this copy is
354*4882a593Smuzhiyun * not needed and thus not performed.
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun struct sym_tcbh {
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * Scripts bus addresses of LUN table accessed from scripts.
359*4882a593Smuzhiyun * LUN #0 is a special case, since multi-lun devices are rare,
360*4882a593Smuzhiyun * and we we want to speed-up the general case and not waste
361*4882a593Smuzhiyun * resources.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun u32 luntbl_sa; /* bus address of this table */
364*4882a593Smuzhiyun u32 lun0_sa; /* bus address of LCB #0 */
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Actual SYNC/WIDE IO registers value for this target.
367*4882a593Smuzhiyun * 'sval', 'wval' and 'uval' are read from SCRIPTS and
368*4882a593Smuzhiyun * so have alignment constraints.
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun /*0*/ u_char uval; /* -> SCNTL4 register */
371*4882a593Smuzhiyun /*1*/ u_char sval; /* -> SXFER io register */
372*4882a593Smuzhiyun /*2*/ u_char filler1;
373*4882a593Smuzhiyun /*3*/ u_char wval; /* -> SCNTL3 io register */
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun * Target Control Block
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun struct sym_tcb {
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun * TCB header.
382*4882a593Smuzhiyun * Assumed at offset 0.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun /*0*/ struct sym_tcbh head;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * LUN table used by the SCRIPTS processor.
388*4882a593Smuzhiyun * An array of bus addresses is used on reselection.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun u32 *luntbl; /* LCBs bus address table */
391*4882a593Smuzhiyun int nlcb; /* Number of valid LCBs (including LUN #0) */
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun * LUN table used by the C code.
395*4882a593Smuzhiyun */
396*4882a593Smuzhiyun struct sym_lcb *lun0p; /* LCB of LUN #0 (usual case) */
397*4882a593Smuzhiyun #if SYM_CONF_MAX_LUN > 1
398*4882a593Smuzhiyun struct sym_lcb **lunmp; /* Other LCBs [1..MAX_LUN] */
399*4882a593Smuzhiyun #endif
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun #ifdef SYM_HAVE_STCB
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * O/S specific data structure.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun struct sym_stcb s;
406*4882a593Smuzhiyun #endif
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* Transfer goal */
409*4882a593Smuzhiyun struct sym_trans tgoal;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* Last printed transfer speed */
412*4882a593Smuzhiyun struct sym_trans tprint;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * Keep track of the CCB used for the negotiation in order
416*4882a593Smuzhiyun * to ensure that only 1 negotiation is queued at a time.
417*4882a593Smuzhiyun */
418*4882a593Smuzhiyun struct sym_ccb * nego_cp; /* CCB used for the nego */
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * Set when we want to reset the device.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun u_char to_reset;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun * Other user settable limits and options.
427*4882a593Smuzhiyun * These limits are read from the NVRAM if present.
428*4882a593Smuzhiyun */
429*4882a593Smuzhiyun unsigned char usrflags;
430*4882a593Smuzhiyun unsigned char usr_period;
431*4882a593Smuzhiyun unsigned char usr_width;
432*4882a593Smuzhiyun unsigned short usrtags;
433*4882a593Smuzhiyun struct scsi_target *starget;
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /*
437*4882a593Smuzhiyun * Global LCB HEADER.
438*4882a593Smuzhiyun *
439*4882a593Smuzhiyun * Due to lack of indirect addressing on earlier NCR chips,
440*4882a593Smuzhiyun * this substructure is copied from the LCB to a global
441*4882a593Smuzhiyun * address after selection.
442*4882a593Smuzhiyun * For SYMBIOS chips that support LOAD/STORE this copy is
443*4882a593Smuzhiyun * not needed and thus not performed.
444*4882a593Smuzhiyun */
445*4882a593Smuzhiyun struct sym_lcbh {
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun * SCRIPTS address jumped by SCRIPTS on reselection.
448*4882a593Smuzhiyun * For not probed logical units, this address points to
449*4882a593Smuzhiyun * SCRIPTS that deal with bad LU handling (must be at
450*4882a593Smuzhiyun * offset zero of the LCB for that reason).
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun /*0*/ u32 resel_sa;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /*
455*4882a593Smuzhiyun * Task (bus address of a CCB) read from SCRIPTS that points
456*4882a593Smuzhiyun * to the unique ITL nexus allowed to be disconnected.
457*4882a593Smuzhiyun */
458*4882a593Smuzhiyun u32 itl_task_sa;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * Task table bus address (read from SCRIPTS).
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun u32 itlq_tbl_sa;
464*4882a593Smuzhiyun };
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * Logical Unit Control Block
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun struct sym_lcb {
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * TCB header.
472*4882a593Smuzhiyun * Assumed at offset 0.
473*4882a593Smuzhiyun */
474*4882a593Smuzhiyun /*0*/ struct sym_lcbh head;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /*
477*4882a593Smuzhiyun * Task table read from SCRIPTS that contains pointers to
478*4882a593Smuzhiyun * ITLQ nexuses. The bus address read from SCRIPTS is
479*4882a593Smuzhiyun * inside the header.
480*4882a593Smuzhiyun */
481*4882a593Smuzhiyun u32 *itlq_tbl; /* Kernel virtual address */
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /*
484*4882a593Smuzhiyun * Busy CCBs management.
485*4882a593Smuzhiyun */
486*4882a593Smuzhiyun u_short busy_itlq; /* Number of busy tagged CCBs */
487*4882a593Smuzhiyun u_short busy_itl; /* Number of busy untagged CCBs */
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * Circular tag allocation buffer.
491*4882a593Smuzhiyun */
492*4882a593Smuzhiyun u_short ia_tag; /* Tag allocation index */
493*4882a593Smuzhiyun u_short if_tag; /* Tag release index */
494*4882a593Smuzhiyun u_char *cb_tags; /* Circular tags buffer */
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * O/S specific data structure.
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun #ifdef SYM_HAVE_SLCB
500*4882a593Smuzhiyun struct sym_slcb s;
501*4882a593Smuzhiyun #endif
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
504*4882a593Smuzhiyun /*
505*4882a593Smuzhiyun * Optionnaly the driver can handle device queueing,
506*4882a593Smuzhiyun * and requeues internally command to redo.
507*4882a593Smuzhiyun */
508*4882a593Smuzhiyun SYM_QUEHEAD waiting_ccbq;
509*4882a593Smuzhiyun SYM_QUEHEAD started_ccbq;
510*4882a593Smuzhiyun int num_sgood;
511*4882a593Smuzhiyun u_short started_tags;
512*4882a593Smuzhiyun u_short started_no_tag;
513*4882a593Smuzhiyun u_short started_max;
514*4882a593Smuzhiyun u_short started_limit;
515*4882a593Smuzhiyun #endif
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
518*4882a593Smuzhiyun /*
519*4882a593Smuzhiyun * Optionally the driver can try to prevent SCSI
520*4882a593Smuzhiyun * IOs from being reordered too much.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun u_char tags_si; /* Current index to tags sum */
523*4882a593Smuzhiyun u_short tags_sum[2]; /* Tags sum counters */
524*4882a593Smuzhiyun u_short tags_since; /* # of tags since last switch */
525*4882a593Smuzhiyun #endif
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun * Set when we want to clear all tasks.
529*4882a593Smuzhiyun */
530*4882a593Smuzhiyun u_char to_clear;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /*
533*4882a593Smuzhiyun * Capabilities.
534*4882a593Smuzhiyun */
535*4882a593Smuzhiyun u_char user_flags;
536*4882a593Smuzhiyun u_char curr_flags;
537*4882a593Smuzhiyun };
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /*
540*4882a593Smuzhiyun * Action from SCRIPTS on a task.
541*4882a593Smuzhiyun * Is part of the CCB, but is also used separately to plug
542*4882a593Smuzhiyun * error handling action to perform from SCRIPTS.
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun struct sym_actscr {
545*4882a593Smuzhiyun u32 start; /* Jumped by SCRIPTS after selection */
546*4882a593Smuzhiyun u32 restart; /* Jumped by SCRIPTS on relection */
547*4882a593Smuzhiyun };
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun * Phase mismatch context.
551*4882a593Smuzhiyun *
552*4882a593Smuzhiyun * It is part of the CCB and is used as parameters for the
553*4882a593Smuzhiyun * DATA pointer. We need two contexts to handle correctly the
554*4882a593Smuzhiyun * SAVED DATA POINTER.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun struct sym_pmc {
557*4882a593Smuzhiyun struct sym_tblmove sg; /* Updated interrupted SG block */
558*4882a593Smuzhiyun u32 ret; /* SCRIPT return address */
559*4882a593Smuzhiyun };
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /*
562*4882a593Smuzhiyun * LUN control block lookup.
563*4882a593Smuzhiyun * We use a direct pointer for LUN #0, and a table of
564*4882a593Smuzhiyun * pointers which is only allocated for devices that support
565*4882a593Smuzhiyun * LUN(s) > 0.
566*4882a593Smuzhiyun */
567*4882a593Smuzhiyun #if SYM_CONF_MAX_LUN <= 1
568*4882a593Smuzhiyun #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL
569*4882a593Smuzhiyun #else
570*4882a593Smuzhiyun #define sym_lp(tp, lun) \
571*4882a593Smuzhiyun (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[((u8)lun)] : NULL
572*4882a593Smuzhiyun #endif
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /*
575*4882a593Smuzhiyun * Status are used by the host and the script processor.
576*4882a593Smuzhiyun *
577*4882a593Smuzhiyun * The last four bytes (status[4]) are copied to the
578*4882a593Smuzhiyun * scratchb register (declared as scr0..scr3) just after the
579*4882a593Smuzhiyun * select/reselect, and copied back just after disconnecting.
580*4882a593Smuzhiyun * Inside the script the XX_REG are used.
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /*
584*4882a593Smuzhiyun * Last four bytes (script)
585*4882a593Smuzhiyun */
586*4882a593Smuzhiyun #define HX_REG scr0
587*4882a593Smuzhiyun #define HX_PRT nc_scr0
588*4882a593Smuzhiyun #define HS_REG scr1
589*4882a593Smuzhiyun #define HS_PRT nc_scr1
590*4882a593Smuzhiyun #define SS_REG scr2
591*4882a593Smuzhiyun #define SS_PRT nc_scr2
592*4882a593Smuzhiyun #define HF_REG scr3
593*4882a593Smuzhiyun #define HF_PRT nc_scr3
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /*
596*4882a593Smuzhiyun * Last four bytes (host)
597*4882a593Smuzhiyun */
598*4882a593Smuzhiyun #define host_xflags phys.head.status[0]
599*4882a593Smuzhiyun #define host_status phys.head.status[1]
600*4882a593Smuzhiyun #define ssss_status phys.head.status[2]
601*4882a593Smuzhiyun #define host_flags phys.head.status[3]
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /*
604*4882a593Smuzhiyun * Host flags
605*4882a593Smuzhiyun */
606*4882a593Smuzhiyun #define HF_IN_PM0 1u
607*4882a593Smuzhiyun #define HF_IN_PM1 (1u<<1)
608*4882a593Smuzhiyun #define HF_ACT_PM (1u<<2)
609*4882a593Smuzhiyun #define HF_DP_SAVED (1u<<3)
610*4882a593Smuzhiyun #define HF_SENSE (1u<<4)
611*4882a593Smuzhiyun #define HF_EXT_ERR (1u<<5)
612*4882a593Smuzhiyun #define HF_DATA_IN (1u<<6)
613*4882a593Smuzhiyun #ifdef SYM_CONF_IARB_SUPPORT
614*4882a593Smuzhiyun #define HF_HINT_IARB (1u<<7)
615*4882a593Smuzhiyun #endif
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /*
618*4882a593Smuzhiyun * More host flags
619*4882a593Smuzhiyun */
620*4882a593Smuzhiyun #if SYM_CONF_DMA_ADDRESSING_MODE == 2
621*4882a593Smuzhiyun #define HX_DMAP_DIRTY (1u<<7)
622*4882a593Smuzhiyun #endif
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /*
625*4882a593Smuzhiyun * Global CCB HEADER.
626*4882a593Smuzhiyun *
627*4882a593Smuzhiyun * Due to lack of indirect addressing on earlier NCR chips,
628*4882a593Smuzhiyun * this substructure is copied from the ccb to a global
629*4882a593Smuzhiyun * address after selection (or reselection) and copied back
630*4882a593Smuzhiyun * before disconnect.
631*4882a593Smuzhiyun * For SYMBIOS chips that support LOAD/STORE this copy is
632*4882a593Smuzhiyun * not needed and thus not performed.
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun struct sym_ccbh {
636*4882a593Smuzhiyun /*
637*4882a593Smuzhiyun * Start and restart SCRIPTS addresses (must be at 0).
638*4882a593Smuzhiyun */
639*4882a593Smuzhiyun /*0*/ struct sym_actscr go;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /*
642*4882a593Smuzhiyun * SCRIPTS jump address that deal with data pointers.
643*4882a593Smuzhiyun * 'savep' points to the position in the script responsible
644*4882a593Smuzhiyun * for the actual transfer of data.
645*4882a593Smuzhiyun * It's written on reception of a SAVE_DATA_POINTER message.
646*4882a593Smuzhiyun */
647*4882a593Smuzhiyun u32 savep; /* Jump address to saved data pointer */
648*4882a593Smuzhiyun u32 lastp; /* SCRIPTS address at end of data */
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /*
651*4882a593Smuzhiyun * Status fields.
652*4882a593Smuzhiyun */
653*4882a593Smuzhiyun u8 status[4];
654*4882a593Smuzhiyun };
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /*
657*4882a593Smuzhiyun * GET/SET the value of the data pointer used by SCRIPTS.
658*4882a593Smuzhiyun *
659*4882a593Smuzhiyun * We must distinguish between the LOAD/STORE-based SCRIPTS
660*4882a593Smuzhiyun * that use directly the header in the CCB, and the NCR-GENERIC
661*4882a593Smuzhiyun * SCRIPTS that use the copy of the header in the HCB.
662*4882a593Smuzhiyun */
663*4882a593Smuzhiyun #if SYM_CONF_GENERIC_SUPPORT
664*4882a593Smuzhiyun #define sym_set_script_dp(np, cp, dp) \
665*4882a593Smuzhiyun do { \
666*4882a593Smuzhiyun if (np->features & FE_LDSTR) \
667*4882a593Smuzhiyun cp->phys.head.lastp = cpu_to_scr(dp); \
668*4882a593Smuzhiyun else \
669*4882a593Smuzhiyun np->ccb_head.lastp = cpu_to_scr(dp); \
670*4882a593Smuzhiyun } while (0)
671*4882a593Smuzhiyun #define sym_get_script_dp(np, cp) \
672*4882a593Smuzhiyun scr_to_cpu((np->features & FE_LDSTR) ? \
673*4882a593Smuzhiyun cp->phys.head.lastp : np->ccb_head.lastp)
674*4882a593Smuzhiyun #else
675*4882a593Smuzhiyun #define sym_set_script_dp(np, cp, dp) \
676*4882a593Smuzhiyun do { \
677*4882a593Smuzhiyun cp->phys.head.lastp = cpu_to_scr(dp); \
678*4882a593Smuzhiyun } while (0)
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun #define sym_get_script_dp(np, cp) (cp->phys.head.lastp)
681*4882a593Smuzhiyun #endif
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /*
684*4882a593Smuzhiyun * Data Structure Block
685*4882a593Smuzhiyun *
686*4882a593Smuzhiyun * During execution of a ccb by the script processor, the
687*4882a593Smuzhiyun * DSA (data structure address) register points to this
688*4882a593Smuzhiyun * substructure of the ccb.
689*4882a593Smuzhiyun */
690*4882a593Smuzhiyun struct sym_dsb {
691*4882a593Smuzhiyun /*
692*4882a593Smuzhiyun * CCB header.
693*4882a593Smuzhiyun * Also assumed at offset 0 of the sym_ccb structure.
694*4882a593Smuzhiyun */
695*4882a593Smuzhiyun /*0*/ struct sym_ccbh head;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * Phase mismatch contexts.
699*4882a593Smuzhiyun * We need two to handle correctly the SAVED DATA POINTER.
700*4882a593Smuzhiyun * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic
701*4882a593Smuzhiyun * for address calculation from SCRIPTS.
702*4882a593Smuzhiyun */
703*4882a593Smuzhiyun struct sym_pmc pm0;
704*4882a593Smuzhiyun struct sym_pmc pm1;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /*
707*4882a593Smuzhiyun * Table data for Script
708*4882a593Smuzhiyun */
709*4882a593Smuzhiyun struct sym_tblsel select;
710*4882a593Smuzhiyun struct sym_tblmove smsg;
711*4882a593Smuzhiyun struct sym_tblmove smsg_ext;
712*4882a593Smuzhiyun struct sym_tblmove cmd;
713*4882a593Smuzhiyun struct sym_tblmove sense;
714*4882a593Smuzhiyun struct sym_tblmove wresid;
715*4882a593Smuzhiyun struct sym_tblmove data [SYM_CONF_MAX_SG];
716*4882a593Smuzhiyun };
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /*
719*4882a593Smuzhiyun * Our Command Control Block
720*4882a593Smuzhiyun */
721*4882a593Smuzhiyun struct sym_ccb {
722*4882a593Smuzhiyun /*
723*4882a593Smuzhiyun * This is the data structure which is pointed by the DSA
724*4882a593Smuzhiyun * register when it is executed by the script processor.
725*4882a593Smuzhiyun * It must be the first entry.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun struct sym_dsb phys;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /*
730*4882a593Smuzhiyun * Pointer to CAM ccb and related stuff.
731*4882a593Smuzhiyun */
732*4882a593Smuzhiyun struct scsi_cmnd *cmd; /* CAM scsiio ccb */
733*4882a593Smuzhiyun u8 cdb_buf[16]; /* Copy of CDB */
734*4882a593Smuzhiyun #define SYM_SNS_BBUF_LEN 32
735*4882a593Smuzhiyun u8 sns_bbuf[SYM_SNS_BBUF_LEN]; /* Bounce buffer for sense data */
736*4882a593Smuzhiyun int data_len; /* Total data length */
737*4882a593Smuzhiyun int segments; /* Number of SG segments */
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun u8 order; /* Tag type (if tagged command) */
740*4882a593Smuzhiyun unsigned char odd_byte_adjustment; /* odd-sized req on wide bus */
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun u_char nego_status; /* Negotiation status */
743*4882a593Smuzhiyun u_char xerr_status; /* Extended error flags */
744*4882a593Smuzhiyun u32 extra_bytes; /* Extraneous bytes transferred */
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun /*
747*4882a593Smuzhiyun * Message areas.
748*4882a593Smuzhiyun * We prepare a message to be sent after selection.
749*4882a593Smuzhiyun * We may use a second one if the command is rescheduled
750*4882a593Smuzhiyun * due to CHECK_CONDITION or COMMAND TERMINATED.
751*4882a593Smuzhiyun * Contents are IDENTIFY and SIMPLE_TAG.
752*4882a593Smuzhiyun * While negotiating sync or wide transfer,
753*4882a593Smuzhiyun * a SDTR or WDTR message is appended.
754*4882a593Smuzhiyun */
755*4882a593Smuzhiyun u_char scsi_smsg [12];
756*4882a593Smuzhiyun u_char scsi_smsg2[12];
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /*
759*4882a593Smuzhiyun * Auto request sense related fields.
760*4882a593Smuzhiyun */
761*4882a593Smuzhiyun u_char sensecmd[6]; /* Request Sense command */
762*4882a593Smuzhiyun u_char sv_scsi_status; /* Saved SCSI status */
763*4882a593Smuzhiyun u_char sv_xerr_status; /* Saved extended status */
764*4882a593Smuzhiyun int sv_resid; /* Saved residual */
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun * Other fields.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun u32 ccb_ba; /* BUS address of this CCB */
770*4882a593Smuzhiyun u_short tag; /* Tag for this transfer */
771*4882a593Smuzhiyun /* NO_TAG means no tag */
772*4882a593Smuzhiyun u_char target;
773*4882a593Smuzhiyun u_char lun;
774*4882a593Smuzhiyun struct sym_ccb *link_ccbh; /* Host adapter CCB hash chain */
775*4882a593Smuzhiyun SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */
776*4882a593Smuzhiyun u32 startp; /* Initial data pointer */
777*4882a593Smuzhiyun u32 goalp; /* Expected last data pointer */
778*4882a593Smuzhiyun int ext_sg; /* Extreme data pointer, used */
779*4882a593Smuzhiyun int ext_ofs; /* to calculate the residual. */
780*4882a593Smuzhiyun #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
781*4882a593Smuzhiyun SYM_QUEHEAD link2_ccbq; /* Link for device queueing */
782*4882a593Smuzhiyun u_char started; /* CCB queued to the squeue */
783*4882a593Smuzhiyun #endif
784*4882a593Smuzhiyun u_char to_abort; /* Want this IO to be aborted */
785*4882a593Smuzhiyun #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
786*4882a593Smuzhiyun u_char tags_si; /* Lun tags sum index (0,1) */
787*4882a593Smuzhiyun #endif
788*4882a593Smuzhiyun };
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun #define CCB_BA(cp,lbl) cpu_to_scr(cp->ccb_ba + offsetof(struct sym_ccb, lbl))
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun typedef struct device *m_pool_ident_t;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /*
795*4882a593Smuzhiyun * Host Control Block
796*4882a593Smuzhiyun */
797*4882a593Smuzhiyun struct sym_hcb {
798*4882a593Smuzhiyun /*
799*4882a593Smuzhiyun * Global headers.
800*4882a593Smuzhiyun * Due to poorness of addressing capabilities, earlier
801*4882a593Smuzhiyun * chips (810, 815, 825) copy part of the data structures
802*4882a593Smuzhiyun * (CCB, TCB and LCB) in fixed areas.
803*4882a593Smuzhiyun */
804*4882a593Smuzhiyun #if SYM_CONF_GENERIC_SUPPORT
805*4882a593Smuzhiyun struct sym_ccbh ccb_head;
806*4882a593Smuzhiyun struct sym_tcbh tcb_head;
807*4882a593Smuzhiyun struct sym_lcbh lcb_head;
808*4882a593Smuzhiyun #endif
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun * Idle task and invalid task actions and
811*4882a593Smuzhiyun * their bus addresses.
812*4882a593Smuzhiyun */
813*4882a593Smuzhiyun struct sym_actscr idletask, notask, bad_itl, bad_itlq;
814*4882a593Smuzhiyun u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun * Dummy lun table to protect us against target
818*4882a593Smuzhiyun * returning bad lun number on reselection.
819*4882a593Smuzhiyun */
820*4882a593Smuzhiyun u32 *badluntbl; /* Table physical address */
821*4882a593Smuzhiyun u32 badlun_sa; /* SCRIPT handler BUS address */
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /*
824*4882a593Smuzhiyun * Bus address of this host control block.
825*4882a593Smuzhiyun */
826*4882a593Smuzhiyun u32 hcb_ba;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun * Bit 32-63 of the on-chip RAM bus address in LE format.
830*4882a593Smuzhiyun * The START_RAM64 script loads the MMRS and MMWS from this
831*4882a593Smuzhiyun * field.
832*4882a593Smuzhiyun */
833*4882a593Smuzhiyun u32 scr_ram_seg;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * Initial value of some IO register bits.
837*4882a593Smuzhiyun * These values are assumed to have been set by BIOS, and may
838*4882a593Smuzhiyun * be used to probe adapter implementation differences.
839*4882a593Smuzhiyun */
840*4882a593Smuzhiyun u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
841*4882a593Smuzhiyun sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
842*4882a593Smuzhiyun sv_stest1;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /*
845*4882a593Smuzhiyun * Actual initial value of IO register bits used by the
846*4882a593Smuzhiyun * driver. They are loaded at initialisation according to
847*4882a593Smuzhiyun * features that are to be enabled/disabled.
848*4882a593Smuzhiyun */
849*4882a593Smuzhiyun u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
850*4882a593Smuzhiyun rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /*
853*4882a593Smuzhiyun * Target data.
854*4882a593Smuzhiyun */
855*4882a593Smuzhiyun struct sym_tcb target[SYM_CONF_MAX_TARGET];
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * Target control block bus address array used by the SCRIPT
859*4882a593Smuzhiyun * on reselection.
860*4882a593Smuzhiyun */
861*4882a593Smuzhiyun u32 *targtbl;
862*4882a593Smuzhiyun u32 targtbl_ba;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun /*
865*4882a593Smuzhiyun * DMA pool handle for this HBA.
866*4882a593Smuzhiyun */
867*4882a593Smuzhiyun m_pool_ident_t bus_dmat;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /*
870*4882a593Smuzhiyun * O/S specific data structure
871*4882a593Smuzhiyun */
872*4882a593Smuzhiyun struct sym_shcb s;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /*
875*4882a593Smuzhiyun * Physical bus addresses of the chip.
876*4882a593Smuzhiyun */
877*4882a593Smuzhiyun u32 mmio_ba; /* MMIO 32 bit BUS address */
878*4882a593Smuzhiyun u32 ram_ba; /* RAM 32 bit BUS address */
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /*
881*4882a593Smuzhiyun * SCRIPTS virtual and physical bus addresses.
882*4882a593Smuzhiyun * 'script' is loaded in the on-chip RAM if present.
883*4882a593Smuzhiyun * 'scripth' stays in main memory for all chips except the
884*4882a593Smuzhiyun * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
885*4882a593Smuzhiyun */
886*4882a593Smuzhiyun u_char *scripta0; /* Copy of scripts A, B, Z */
887*4882a593Smuzhiyun u_char *scriptb0;
888*4882a593Smuzhiyun u_char *scriptz0;
889*4882a593Smuzhiyun u32 scripta_ba; /* Actual scripts A, B, Z */
890*4882a593Smuzhiyun u32 scriptb_ba; /* 32 bit bus addresses. */
891*4882a593Smuzhiyun u32 scriptz_ba;
892*4882a593Smuzhiyun u_short scripta_sz; /* Actual size of script A, B, Z*/
893*4882a593Smuzhiyun u_short scriptb_sz;
894*4882a593Smuzhiyun u_short scriptz_sz;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /*
897*4882a593Smuzhiyun * Bus addresses, setup and patch methods for
898*4882a593Smuzhiyun * the selected firmware.
899*4882a593Smuzhiyun */
900*4882a593Smuzhiyun struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */
901*4882a593Smuzhiyun struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */
902*4882a593Smuzhiyun struct sym_fwz_ba fwz_bas; /* Useful SCRIPTZ bus addresses */
903*4882a593Smuzhiyun void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw);
904*4882a593Smuzhiyun void (*fw_patch)(struct Scsi_Host *);
905*4882a593Smuzhiyun char *fw_name;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /*
908*4882a593Smuzhiyun * General controller parameters and configuration.
909*4882a593Smuzhiyun */
910*4882a593Smuzhiyun u_int features; /* Chip features map */
911*4882a593Smuzhiyun u_char myaddr; /* SCSI id of the adapter */
912*4882a593Smuzhiyun u_char maxburst; /* log base 2 of dwords burst */
913*4882a593Smuzhiyun u_char maxwide; /* Maximum transfer width */
914*4882a593Smuzhiyun u_char minsync; /* Min sync period factor (ST) */
915*4882a593Smuzhiyun u_char maxsync; /* Max sync period factor (ST) */
916*4882a593Smuzhiyun u_char maxoffs; /* Max scsi offset (ST) */
917*4882a593Smuzhiyun u_char minsync_dt; /* Min sync period factor (DT) */
918*4882a593Smuzhiyun u_char maxsync_dt; /* Max sync period factor (DT) */
919*4882a593Smuzhiyun u_char maxoffs_dt; /* Max scsi offset (DT) */
920*4882a593Smuzhiyun u_char multiplier; /* Clock multiplier (1,2,4) */
921*4882a593Smuzhiyun u_char clock_divn; /* Number of clock divisors */
922*4882a593Smuzhiyun u32 clock_khz; /* SCSI clock frequency in KHz */
923*4882a593Smuzhiyun u32 pciclk_khz; /* Estimated PCI clock in KHz */
924*4882a593Smuzhiyun /*
925*4882a593Smuzhiyun * Start queue management.
926*4882a593Smuzhiyun * It is filled up by the host processor and accessed by the
927*4882a593Smuzhiyun * SCRIPTS processor in order to start SCSI commands.
928*4882a593Smuzhiyun */
929*4882a593Smuzhiyun volatile /* Prevent code optimizations */
930*4882a593Smuzhiyun u32 *squeue; /* Start queue virtual address */
931*4882a593Smuzhiyun u32 squeue_ba; /* Start queue BUS address */
932*4882a593Smuzhiyun u_short squeueput; /* Next free slot of the queue */
933*4882a593Smuzhiyun u_short actccbs; /* Number of allocated CCBs */
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /*
936*4882a593Smuzhiyun * Command completion queue.
937*4882a593Smuzhiyun * It is the same size as the start queue to avoid overflow.
938*4882a593Smuzhiyun */
939*4882a593Smuzhiyun u_short dqueueget; /* Next position to scan */
940*4882a593Smuzhiyun volatile /* Prevent code optimizations */
941*4882a593Smuzhiyun u32 *dqueue; /* Completion (done) queue */
942*4882a593Smuzhiyun u32 dqueue_ba; /* Done queue BUS address */
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /*
945*4882a593Smuzhiyun * Miscellaneous buffers accessed by the scripts-processor.
946*4882a593Smuzhiyun * They shall be DWORD aligned, because they may be read or
947*4882a593Smuzhiyun * written with a script command.
948*4882a593Smuzhiyun */
949*4882a593Smuzhiyun u_char msgout[8]; /* Buffer for MESSAGE OUT */
950*4882a593Smuzhiyun u_char msgin [8]; /* Buffer for MESSAGE IN */
951*4882a593Smuzhiyun u32 lastmsg; /* Last SCSI message sent */
952*4882a593Smuzhiyun u32 scratch; /* Scratch for SCSI receive */
953*4882a593Smuzhiyun /* Also used for cache test */
954*4882a593Smuzhiyun /*
955*4882a593Smuzhiyun * Miscellaneous configuration and status parameters.
956*4882a593Smuzhiyun */
957*4882a593Smuzhiyun u_char usrflags; /* Miscellaneous user flags */
958*4882a593Smuzhiyun u_char scsi_mode; /* Current SCSI BUS mode */
959*4882a593Smuzhiyun u_char verbose; /* Verbosity for this controller*/
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun /*
962*4882a593Smuzhiyun * CCB lists and queue.
963*4882a593Smuzhiyun */
964*4882a593Smuzhiyun struct sym_ccb **ccbh; /* CCBs hashed by DSA value */
965*4882a593Smuzhiyun /* CCB_HASH_SIZE lists of CCBs */
966*4882a593Smuzhiyun SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */
967*4882a593Smuzhiyun SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /*
970*4882a593Smuzhiyun * During error handling and/or recovery,
971*4882a593Smuzhiyun * active CCBs that are to be completed with
972*4882a593Smuzhiyun * error or requeued are moved from the busy_ccbq
973*4882a593Smuzhiyun * to the comp_ccbq prior to completion.
974*4882a593Smuzhiyun */
975*4882a593Smuzhiyun SYM_QUEHEAD comp_ccbq;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
978*4882a593Smuzhiyun SYM_QUEHEAD dummy_ccbq;
979*4882a593Smuzhiyun #endif
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /*
982*4882a593Smuzhiyun * IMMEDIATE ARBITRATION (IARB) control.
983*4882a593Smuzhiyun *
984*4882a593Smuzhiyun * We keep track in 'last_cp' of the last CCB that has been
985*4882a593Smuzhiyun * queued to the SCRIPTS processor and clear 'last_cp' when
986*4882a593Smuzhiyun * this CCB completes. If last_cp is not zero at the moment
987*4882a593Smuzhiyun * we queue a new CCB, we set a flag in 'last_cp' that is
988*4882a593Smuzhiyun * used by the SCRIPTS as a hint for setting IARB.
989*4882a593Smuzhiyun * We donnot set more than 'iarb_max' consecutive hints for
990*4882a593Smuzhiyun * IARB in order to leave devices a chance to reselect.
991*4882a593Smuzhiyun * By the way, any non zero value of 'iarb_max' is unfair. :)
992*4882a593Smuzhiyun */
993*4882a593Smuzhiyun #ifdef SYM_CONF_IARB_SUPPORT
994*4882a593Smuzhiyun u_short iarb_max; /* Max. # consecutive IARB hints*/
995*4882a593Smuzhiyun u_short iarb_count; /* Actual # of these hints */
996*4882a593Smuzhiyun struct sym_ccb * last_cp;
997*4882a593Smuzhiyun #endif
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun /*
1000*4882a593Smuzhiyun * Command abort handling.
1001*4882a593Smuzhiyun * We need to synchronize tightly with the SCRIPTS
1002*4882a593Smuzhiyun * processor in order to handle things correctly.
1003*4882a593Smuzhiyun */
1004*4882a593Smuzhiyun u_char abrt_msg[4]; /* Message to send buffer */
1005*4882a593Smuzhiyun struct sym_tblmove abrt_tbl; /* Table for the MOV of it */
1006*4882a593Smuzhiyun struct sym_tblsel abrt_sel; /* Sync params for selection */
1007*4882a593Smuzhiyun u_char istat_sem; /* Tells the chip to stop (SEM) */
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /*
1010*4882a593Smuzhiyun * 64 bit DMA handling.
1011*4882a593Smuzhiyun */
1012*4882a593Smuzhiyun #if SYM_CONF_DMA_ADDRESSING_MODE != 0
1013*4882a593Smuzhiyun u_char use_dac; /* Use PCI DAC cycles */
1014*4882a593Smuzhiyun #if SYM_CONF_DMA_ADDRESSING_MODE == 2
1015*4882a593Smuzhiyun u_char dmap_dirty; /* Dma segments registers dirty */
1016*4882a593Smuzhiyun u32 dmap_bah[SYM_DMAP_SIZE];/* Segment registers map */
1017*4882a593Smuzhiyun #endif
1018*4882a593Smuzhiyun #endif
1019*4882a593Smuzhiyun };
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun #if SYM_CONF_DMA_ADDRESSING_MODE == 0
1022*4882a593Smuzhiyun #define use_dac(np) 0
1023*4882a593Smuzhiyun #define set_dac(np) do { } while (0)
1024*4882a593Smuzhiyun #else
1025*4882a593Smuzhiyun #define use_dac(np) (np)->use_dac
1026*4882a593Smuzhiyun #define set_dac(np) (np)->use_dac = 1
1027*4882a593Smuzhiyun #endif
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun /*
1033*4882a593Smuzhiyun * FIRMWARES (sym_fw.c)
1034*4882a593Smuzhiyun */
1035*4882a593Smuzhiyun struct sym_fw * sym_find_firmware(struct sym_chip *chip);
1036*4882a593Smuzhiyun void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /*
1039*4882a593Smuzhiyun * Driver methods called from O/S specific code.
1040*4882a593Smuzhiyun */
1041*4882a593Smuzhiyun char *sym_driver_name(void);
1042*4882a593Smuzhiyun void sym_print_xerr(struct scsi_cmnd *cmd, int x_status);
1043*4882a593Smuzhiyun int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
1044*4882a593Smuzhiyun struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
1045*4882a593Smuzhiyun #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
1046*4882a593Smuzhiyun void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
1047*4882a593Smuzhiyun #else
1048*4882a593Smuzhiyun void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
1049*4882a593Smuzhiyun #endif
1050*4882a593Smuzhiyun void sym_start_up(struct Scsi_Host *, int reason);
1051*4882a593Smuzhiyun irqreturn_t sym_interrupt(struct Scsi_Host *);
1052*4882a593Smuzhiyun int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task);
1053*4882a593Smuzhiyun struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1054*4882a593Smuzhiyun void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1055*4882a593Smuzhiyun struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1056*4882a593Smuzhiyun int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1057*4882a593Smuzhiyun int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1058*4882a593Smuzhiyun int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1059*4882a593Smuzhiyun int sym_reset_scsi_target(struct sym_hcb *np, int target);
1060*4882a593Smuzhiyun void sym_hcb_free(struct sym_hcb *np);
1061*4882a593Smuzhiyun int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram);
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /*
1064*4882a593Smuzhiyun * Build a scatter/gather entry.
1065*4882a593Smuzhiyun *
1066*4882a593Smuzhiyun * For 64 bit systems, we use the 8 upper bits of the size field
1067*4882a593Smuzhiyun * to provide bus address bits 32-39 to the SCRIPTS processor.
1068*4882a593Smuzhiyun * This allows the 895A, 896, 1010 to address up to 1 TB of memory.
1069*4882a593Smuzhiyun */
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun #if SYM_CONF_DMA_ADDRESSING_MODE == 0
1072*4882a593Smuzhiyun #define DMA_DAC_MASK DMA_BIT_MASK(32)
1073*4882a593Smuzhiyun #define sym_build_sge(np, data, badd, len) \
1074*4882a593Smuzhiyun do { \
1075*4882a593Smuzhiyun (data)->addr = cpu_to_scr(badd); \
1076*4882a593Smuzhiyun (data)->size = cpu_to_scr(len); \
1077*4882a593Smuzhiyun } while (0)
1078*4882a593Smuzhiyun #elif SYM_CONF_DMA_ADDRESSING_MODE == 1
1079*4882a593Smuzhiyun #define DMA_DAC_MASK DMA_BIT_MASK(40)
1080*4882a593Smuzhiyun #define sym_build_sge(np, data, badd, len) \
1081*4882a593Smuzhiyun do { \
1082*4882a593Smuzhiyun (data)->addr = cpu_to_scr(badd); \
1083*4882a593Smuzhiyun (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
1084*4882a593Smuzhiyun } while (0)
1085*4882a593Smuzhiyun #elif SYM_CONF_DMA_ADDRESSING_MODE == 2
1086*4882a593Smuzhiyun #define DMA_DAC_MASK DMA_BIT_MASK(64)
1087*4882a593Smuzhiyun int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
1088*4882a593Smuzhiyun static inline void
sym_build_sge(struct sym_hcb * np,struct sym_tblmove * data,u64 badd,int len)1089*4882a593Smuzhiyun sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun u32 h = (badd>>32);
1092*4882a593Smuzhiyun int s = (h&SYM_DMAP_MASK);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun if (h != np->dmap_bah[s])
1095*4882a593Smuzhiyun goto bad;
1096*4882a593Smuzhiyun good:
1097*4882a593Smuzhiyun (data)->addr = cpu_to_scr(badd);
1098*4882a593Smuzhiyun (data)->size = cpu_to_scr((s<<24) + len);
1099*4882a593Smuzhiyun return;
1100*4882a593Smuzhiyun bad:
1101*4882a593Smuzhiyun s = sym_lookup_dmap(np, h, s);
1102*4882a593Smuzhiyun goto good;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun #else
1105*4882a593Smuzhiyun #error "Unsupported DMA addressing mode"
1106*4882a593Smuzhiyun #endif
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /*
1109*4882a593Smuzhiyun * MEMORY ALLOCATOR.
1110*4882a593Smuzhiyun */
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun #define sym_get_mem_cluster() \
1113*4882a593Smuzhiyun (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER)
1114*4882a593Smuzhiyun #define sym_free_mem_cluster(p) \
1115*4882a593Smuzhiyun free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER)
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun /*
1118*4882a593Smuzhiyun * Link between free memory chunks of a given size.
1119*4882a593Smuzhiyun */
1120*4882a593Smuzhiyun typedef struct sym_m_link {
1121*4882a593Smuzhiyun struct sym_m_link *next;
1122*4882a593Smuzhiyun } *m_link_p;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /*
1125*4882a593Smuzhiyun * Virtual to bus physical translation for a given cluster.
1126*4882a593Smuzhiyun * Such a structure is only useful with DMA abstraction.
1127*4882a593Smuzhiyun */
1128*4882a593Smuzhiyun typedef struct sym_m_vtob { /* Virtual to Bus address translation */
1129*4882a593Smuzhiyun struct sym_m_vtob *next;
1130*4882a593Smuzhiyun void *vaddr; /* Virtual address */
1131*4882a593Smuzhiyun dma_addr_t baddr; /* Bus physical address */
1132*4882a593Smuzhiyun } *m_vtob_p;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* Hash this stuff a bit to speed up translations */
1135*4882a593Smuzhiyun #define VTOB_HASH_SHIFT 5
1136*4882a593Smuzhiyun #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
1137*4882a593Smuzhiyun #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
1138*4882a593Smuzhiyun #define VTOB_HASH_CODE(m) \
1139*4882a593Smuzhiyun ((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK)
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun /*
1142*4882a593Smuzhiyun * Memory pool of a given kind.
1143*4882a593Smuzhiyun * Ideally, we want to use:
1144*4882a593Smuzhiyun * 1) 1 pool for memory we donnot need to involve in DMA.
1145*4882a593Smuzhiyun * 2) The same pool for controllers that require same DMA
1146*4882a593Smuzhiyun * constraints and features.
1147*4882a593Smuzhiyun * The OS specific m_pool_id_t thing and the sym_m_pool_match()
1148*4882a593Smuzhiyun * method are expected to tell the driver about.
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun typedef struct sym_m_pool {
1151*4882a593Smuzhiyun m_pool_ident_t dev_dmat; /* Identifies the pool (see above) */
1152*4882a593Smuzhiyun void * (*get_mem_cluster)(struct sym_m_pool *);
1153*4882a593Smuzhiyun #ifdef SYM_MEM_FREE_UNUSED
1154*4882a593Smuzhiyun void (*free_mem_cluster)(struct sym_m_pool *, void *);
1155*4882a593Smuzhiyun #endif
1156*4882a593Smuzhiyun #define M_GET_MEM_CLUSTER() mp->get_mem_cluster(mp)
1157*4882a593Smuzhiyun #define M_FREE_MEM_CLUSTER(p) mp->free_mem_cluster(mp, p)
1158*4882a593Smuzhiyun int nump;
1159*4882a593Smuzhiyun m_vtob_p vtob[VTOB_HASH_SIZE];
1160*4882a593Smuzhiyun struct sym_m_pool *next;
1161*4882a593Smuzhiyun struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1];
1162*4882a593Smuzhiyun } *m_pool_p;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /*
1165*4882a593Smuzhiyun * Alloc, free and translate addresses to bus physical
1166*4882a593Smuzhiyun * for DMAable memory.
1167*4882a593Smuzhiyun */
1168*4882a593Smuzhiyun void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name);
1169*4882a593Smuzhiyun void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name);
1170*4882a593Smuzhiyun dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /*
1173*4882a593Smuzhiyun * Verbs used by the driver code for DMAable memory handling.
1174*4882a593Smuzhiyun * The _uvptv_ macro avoids a nasty warning about pointer to volatile
1175*4882a593Smuzhiyun * being discarded.
1176*4882a593Smuzhiyun */
1177*4882a593Smuzhiyun #define _uvptv_(p) ((void *)((u_long)(p)))
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun #define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n)
1180*4882a593Smuzhiyun #define _sym_mfree_dma(np, p, l, n) \
1181*4882a593Smuzhiyun __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n)
1182*4882a593Smuzhiyun #define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n)
1183*4882a593Smuzhiyun #define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n)
1184*4882a593Smuzhiyun #define vtobus(p) __vtobus(np->bus_dmat, _uvptv_(p))
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun /*
1187*4882a593Smuzhiyun * We have to provide the driver memory allocator with methods for
1188*4882a593Smuzhiyun * it to maintain virtual to bus physical address translations.
1189*4882a593Smuzhiyun */
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun #define sym_m_pool_match(mp_id1, mp_id2) (mp_id1 == mp_id2)
1192*4882a593Smuzhiyun
sym_m_get_dma_mem_cluster(m_pool_p mp,m_vtob_p vbp)1193*4882a593Smuzhiyun static inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun void *vaddr = NULL;
1196*4882a593Smuzhiyun dma_addr_t baddr = 0;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr,
1199*4882a593Smuzhiyun GFP_ATOMIC);
1200*4882a593Smuzhiyun if (vaddr) {
1201*4882a593Smuzhiyun vbp->vaddr = vaddr;
1202*4882a593Smuzhiyun vbp->baddr = baddr;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun return vaddr;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
sym_m_free_dma_mem_cluster(m_pool_p mp,m_vtob_p vbp)1207*4882a593Smuzhiyun static inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
1210*4882a593Smuzhiyun vbp->baddr);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun #endif /* SYM_HIPD_H */
1214