xref: /OK3568_Linux_fs/kernel/drivers/scsi/hptiop.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * HighPoint RR3xxx/4xxx controller driver for Linux
4*4882a593Smuzhiyun  * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Please report bugs/comments/suggestions to linux@highpoint-tech.com
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * For more information, visit http://www.highpoint-tech.com
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun #ifndef _HPTIOP_H_
11*4882a593Smuzhiyun #define _HPTIOP_H_
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun struct hpt_iopmu_itl {
14*4882a593Smuzhiyun 	__le32 resrved0[4];
15*4882a593Smuzhiyun 	__le32 inbound_msgaddr0;
16*4882a593Smuzhiyun 	__le32 inbound_msgaddr1;
17*4882a593Smuzhiyun 	__le32 outbound_msgaddr0;
18*4882a593Smuzhiyun 	__le32 outbound_msgaddr1;
19*4882a593Smuzhiyun 	__le32 inbound_doorbell;
20*4882a593Smuzhiyun 	__le32 inbound_intstatus;
21*4882a593Smuzhiyun 	__le32 inbound_intmask;
22*4882a593Smuzhiyun 	__le32 outbound_doorbell;
23*4882a593Smuzhiyun 	__le32 outbound_intstatus;
24*4882a593Smuzhiyun 	__le32 outbound_intmask;
25*4882a593Smuzhiyun 	__le32 reserved1[2];
26*4882a593Smuzhiyun 	__le32 inbound_queue;
27*4882a593Smuzhiyun 	__le32 outbound_queue;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define IOPMU_QUEUE_EMPTY            0xffffffff
31*4882a593Smuzhiyun #define IOPMU_QUEUE_MASK_HOST_BITS   0xf0000000
32*4882a593Smuzhiyun #define IOPMU_QUEUE_ADDR_HOST_BIT    0x80000000
33*4882a593Smuzhiyun #define IOPMU_QUEUE_REQUEST_SIZE_BIT    0x40000000
34*4882a593Smuzhiyun #define IOPMU_QUEUE_REQUEST_RESULT_BIT   0x40000000
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define IOPMU_OUTBOUND_INT_MSG0      1
37*4882a593Smuzhiyun #define IOPMU_OUTBOUND_INT_MSG1      2
38*4882a593Smuzhiyun #define IOPMU_OUTBOUND_INT_DOORBELL  4
39*4882a593Smuzhiyun #define IOPMU_OUTBOUND_INT_POSTQUEUE 8
40*4882a593Smuzhiyun #define IOPMU_OUTBOUND_INT_PCI       0x10
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define IOPMU_INBOUND_INT_MSG0       1
43*4882a593Smuzhiyun #define IOPMU_INBOUND_INT_MSG1       2
44*4882a593Smuzhiyun #define IOPMU_INBOUND_INT_DOORBELL   4
45*4882a593Smuzhiyun #define IOPMU_INBOUND_INT_ERROR      8
46*4882a593Smuzhiyun #define IOPMU_INBOUND_INT_POSTQUEUE  0x10
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define MVIOP_QUEUE_LEN  512
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun struct hpt_iopmu_mv {
51*4882a593Smuzhiyun 	__le32 inbound_head;
52*4882a593Smuzhiyun 	__le32 inbound_tail;
53*4882a593Smuzhiyun 	__le32 outbound_head;
54*4882a593Smuzhiyun 	__le32 outbound_tail;
55*4882a593Smuzhiyun 	__le32 inbound_msg;
56*4882a593Smuzhiyun 	__le32 outbound_msg;
57*4882a593Smuzhiyun 	__le32 reserve[10];
58*4882a593Smuzhiyun 	__le64 inbound_q[MVIOP_QUEUE_LEN];
59*4882a593Smuzhiyun 	__le64 outbound_q[MVIOP_QUEUE_LEN];
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun struct hpt_iopmv_regs {
63*4882a593Smuzhiyun 	__le32 reserved[0x20400 / 4];
64*4882a593Smuzhiyun 	__le32 inbound_doorbell;
65*4882a593Smuzhiyun 	__le32 inbound_intmask;
66*4882a593Smuzhiyun 	__le32 outbound_doorbell;
67*4882a593Smuzhiyun 	__le32 outbound_intmask;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #pragma pack(1)
71*4882a593Smuzhiyun struct hpt_iopmu_mvfrey {
72*4882a593Smuzhiyun 	__le32 reserved0[(0x4000 - 0) / 4];
73*4882a593Smuzhiyun 	__le32 inbound_base;
74*4882a593Smuzhiyun 	__le32 inbound_base_high;
75*4882a593Smuzhiyun 	__le32 reserved1[(0x4018 - 0x4008) / 4];
76*4882a593Smuzhiyun 	__le32 inbound_write_ptr;
77*4882a593Smuzhiyun 	__le32 reserved2[(0x402c - 0x401c) / 4];
78*4882a593Smuzhiyun 	__le32 inbound_conf_ctl;
79*4882a593Smuzhiyun 	__le32 reserved3[(0x4050 - 0x4030) / 4];
80*4882a593Smuzhiyun 	__le32 outbound_base;
81*4882a593Smuzhiyun 	__le32 outbound_base_high;
82*4882a593Smuzhiyun 	__le32 outbound_shadow_base;
83*4882a593Smuzhiyun 	__le32 outbound_shadow_base_high;
84*4882a593Smuzhiyun 	__le32 reserved4[(0x4088 - 0x4060) / 4];
85*4882a593Smuzhiyun 	__le32 isr_cause;
86*4882a593Smuzhiyun 	__le32 isr_enable;
87*4882a593Smuzhiyun 	__le32 reserved5[(0x1020c - 0x4090) / 4];
88*4882a593Smuzhiyun 	__le32 pcie_f0_int_enable;
89*4882a593Smuzhiyun 	__le32 reserved6[(0x10400 - 0x10210) / 4];
90*4882a593Smuzhiyun 	__le32 f0_to_cpu_msg_a;
91*4882a593Smuzhiyun 	__le32 reserved7[(0x10420 - 0x10404) / 4];
92*4882a593Smuzhiyun 	__le32 cpu_to_f0_msg_a;
93*4882a593Smuzhiyun 	__le32 reserved8[(0x10480 - 0x10424) / 4];
94*4882a593Smuzhiyun 	__le32 f0_doorbell;
95*4882a593Smuzhiyun 	__le32 f0_doorbell_enable;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun struct mvfrey_inlist_entry {
99*4882a593Smuzhiyun 	dma_addr_t addr;
100*4882a593Smuzhiyun 	__le32 intrfc_len;
101*4882a593Smuzhiyun 	__le32 reserved;
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun struct mvfrey_outlist_entry {
105*4882a593Smuzhiyun 	__le32 val;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun #pragma pack()
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define MVIOP_MU_QUEUE_ADDR_HOST_MASK   (~(0x1full))
110*4882a593Smuzhiyun #define MVIOP_MU_QUEUE_ADDR_HOST_BIT    4
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32  0xffffffff
113*4882a593Smuzhiyun #define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT   1
114*4882a593Smuzhiyun #define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define MVIOP_MU_INBOUND_INT_MSG        1
117*4882a593Smuzhiyun #define MVIOP_MU_INBOUND_INT_POSTQUEUE  2
118*4882a593Smuzhiyun #define MVIOP_MU_OUTBOUND_INT_MSG       1
119*4882a593Smuzhiyun #define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define CL_POINTER_TOGGLE        0x00004000
122*4882a593Smuzhiyun #define CPU_TO_F0_DRBL_MSG_BIT   0x02000000
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun enum hpt_iopmu_message {
125*4882a593Smuzhiyun 	/* host-to-iop messages */
126*4882a593Smuzhiyun 	IOPMU_INBOUND_MSG0_NOP = 0,
127*4882a593Smuzhiyun 	IOPMU_INBOUND_MSG0_RESET,
128*4882a593Smuzhiyun 	IOPMU_INBOUND_MSG0_FLUSH,
129*4882a593Smuzhiyun 	IOPMU_INBOUND_MSG0_SHUTDOWN,
130*4882a593Smuzhiyun 	IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
131*4882a593Smuzhiyun 	IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
132*4882a593Smuzhiyun 	IOPMU_INBOUND_MSG0_RESET_COMM,
133*4882a593Smuzhiyun 	IOPMU_INBOUND_MSG0_MAX = 0xff,
134*4882a593Smuzhiyun 	/* iop-to-host messages */
135*4882a593Smuzhiyun 	IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
136*4882a593Smuzhiyun 	IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_MAX = 0x1ff,
137*4882a593Smuzhiyun 	IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_0 = 0x200,
138*4882a593Smuzhiyun 	IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_MAX = 0x2ff,
139*4882a593Smuzhiyun 	IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_0 = 0x300,
140*4882a593Smuzhiyun 	IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun struct hpt_iop_request_header {
144*4882a593Smuzhiyun 	__le32 size;
145*4882a593Smuzhiyun 	__le32 type;
146*4882a593Smuzhiyun 	__le32 flags;
147*4882a593Smuzhiyun 	__le32 result;
148*4882a593Smuzhiyun 	__le32 context; /* host context */
149*4882a593Smuzhiyun 	__le32 context_hi32;
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #define IOP_REQUEST_FLAG_SYNC_REQUEST 1
153*4882a593Smuzhiyun #define IOP_REQUEST_FLAG_BIST_REQUEST 2
154*4882a593Smuzhiyun #define IOP_REQUEST_FLAG_REMAPPED     4
155*4882a593Smuzhiyun #define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
156*4882a593Smuzhiyun #define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun enum hpt_iop_request_type {
159*4882a593Smuzhiyun 	IOP_REQUEST_TYPE_GET_CONFIG = 0,
160*4882a593Smuzhiyun 	IOP_REQUEST_TYPE_SET_CONFIG,
161*4882a593Smuzhiyun 	IOP_REQUEST_TYPE_BLOCK_COMMAND,
162*4882a593Smuzhiyun 	IOP_REQUEST_TYPE_SCSI_COMMAND,
163*4882a593Smuzhiyun 	IOP_REQUEST_TYPE_IOCTL_COMMAND,
164*4882a593Smuzhiyun 	IOP_REQUEST_TYPE_MAX
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun enum hpt_iop_result_type {
168*4882a593Smuzhiyun 	IOP_RESULT_PENDING = 0,
169*4882a593Smuzhiyun 	IOP_RESULT_SUCCESS,
170*4882a593Smuzhiyun 	IOP_RESULT_FAIL,
171*4882a593Smuzhiyun 	IOP_RESULT_BUSY,
172*4882a593Smuzhiyun 	IOP_RESULT_RESET,
173*4882a593Smuzhiyun 	IOP_RESULT_INVALID_REQUEST,
174*4882a593Smuzhiyun 	IOP_RESULT_BAD_TARGET,
175*4882a593Smuzhiyun 	IOP_RESULT_CHECK_CONDITION,
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun struct hpt_iop_request_get_config {
179*4882a593Smuzhiyun 	struct hpt_iop_request_header header;
180*4882a593Smuzhiyun 	__le32 interface_version;
181*4882a593Smuzhiyun 	__le32 firmware_version;
182*4882a593Smuzhiyun 	__le32 max_requests;
183*4882a593Smuzhiyun 	__le32 request_size;
184*4882a593Smuzhiyun 	__le32 max_sg_count;
185*4882a593Smuzhiyun 	__le32 data_transfer_length;
186*4882a593Smuzhiyun 	__le32 alignment_mask;
187*4882a593Smuzhiyun 	__le32 max_devices;
188*4882a593Smuzhiyun 	__le32 sdram_size;
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun struct hpt_iop_request_set_config {
192*4882a593Smuzhiyun 	struct hpt_iop_request_header header;
193*4882a593Smuzhiyun 	__le32 iop_id;
194*4882a593Smuzhiyun 	__le16 vbus_id;
195*4882a593Smuzhiyun 	__le16 max_host_request_size;
196*4882a593Smuzhiyun 	__le32 reserve[6];
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun struct hpt_iopsg {
200*4882a593Smuzhiyun 	__le32 size;
201*4882a593Smuzhiyun 	__le32 eot; /* non-zero: end of table */
202*4882a593Smuzhiyun 	__le64 pci_address;
203*4882a593Smuzhiyun };
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun struct hpt_iop_request_block_command {
206*4882a593Smuzhiyun 	struct hpt_iop_request_header header;
207*4882a593Smuzhiyun 	u8     channel;
208*4882a593Smuzhiyun 	u8     target;
209*4882a593Smuzhiyun 	u8     lun;
210*4882a593Smuzhiyun 	u8     pad1;
211*4882a593Smuzhiyun 	__le16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */
212*4882a593Smuzhiyun 	__le16 sectors;
213*4882a593Smuzhiyun 	__le64 lba;
214*4882a593Smuzhiyun 	struct hpt_iopsg sg_list[1];
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun #define IOP_BLOCK_COMMAND_READ     1
218*4882a593Smuzhiyun #define IOP_BLOCK_COMMAND_WRITE    2
219*4882a593Smuzhiyun #define IOP_BLOCK_COMMAND_VERIFY   3
220*4882a593Smuzhiyun #define IOP_BLOCK_COMMAND_FLUSH    4
221*4882a593Smuzhiyun #define IOP_BLOCK_COMMAND_SHUTDOWN 5
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun struct hpt_iop_request_scsi_command {
224*4882a593Smuzhiyun 	struct hpt_iop_request_header header;
225*4882a593Smuzhiyun 	u8     channel;
226*4882a593Smuzhiyun 	u8     target;
227*4882a593Smuzhiyun 	u8     lun;
228*4882a593Smuzhiyun 	u8     pad1;
229*4882a593Smuzhiyun 	u8     cdb[16];
230*4882a593Smuzhiyun 	__le32 dataxfer_length;
231*4882a593Smuzhiyun 	struct hpt_iopsg sg_list[1];
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun struct hpt_iop_request_ioctl_command {
235*4882a593Smuzhiyun 	struct hpt_iop_request_header header;
236*4882a593Smuzhiyun 	__le32 ioctl_code;
237*4882a593Smuzhiyun 	__le32 inbuf_size;
238*4882a593Smuzhiyun 	__le32 outbuf_size;
239*4882a593Smuzhiyun 	__le32 bytes_returned;
240*4882a593Smuzhiyun 	u8     buf[1];
241*4882a593Smuzhiyun 	/* out data should be put at buf[(inbuf_size+3)&~3] */
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun #define HPTIOP_MAX_REQUESTS  256u
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun struct hptiop_request {
247*4882a593Smuzhiyun 	struct hptiop_request *next;
248*4882a593Smuzhiyun 	void                  *req_virt;
249*4882a593Smuzhiyun 	u32                   req_shifted_phy;
250*4882a593Smuzhiyun 	struct scsi_cmnd      *scp;
251*4882a593Smuzhiyun 	int                   index;
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun struct hpt_scsi_pointer {
255*4882a593Smuzhiyun 	int mapped;
256*4882a593Smuzhiyun 	int sgcnt;
257*4882a593Smuzhiyun 	dma_addr_t dma_handle;
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun #define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun enum hptiop_family {
263*4882a593Smuzhiyun 	UNKNOWN_BASED_IOP,
264*4882a593Smuzhiyun 	INTEL_BASED_IOP,
265*4882a593Smuzhiyun 	MV_BASED_IOP,
266*4882a593Smuzhiyun 	MVFREY_BASED_IOP
267*4882a593Smuzhiyun } ;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun struct hptiop_hba {
270*4882a593Smuzhiyun 	struct hptiop_adapter_ops *ops;
271*4882a593Smuzhiyun 	union {
272*4882a593Smuzhiyun 		struct {
273*4882a593Smuzhiyun 			struct hpt_iopmu_itl __iomem *iop;
274*4882a593Smuzhiyun 			void __iomem *plx;
275*4882a593Smuzhiyun 		} itl;
276*4882a593Smuzhiyun 		struct {
277*4882a593Smuzhiyun 			struct hpt_iopmv_regs *regs;
278*4882a593Smuzhiyun 			struct hpt_iopmu_mv __iomem *mu;
279*4882a593Smuzhiyun 			void *internal_req;
280*4882a593Smuzhiyun 			dma_addr_t internal_req_phy;
281*4882a593Smuzhiyun 		} mv;
282*4882a593Smuzhiyun 		struct {
283*4882a593Smuzhiyun 			struct hpt_iop_request_get_config __iomem *config;
284*4882a593Smuzhiyun 			struct hpt_iopmu_mvfrey __iomem *mu;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 			int internal_mem_size;
287*4882a593Smuzhiyun 			struct hptiop_request internal_req;
288*4882a593Smuzhiyun 			int list_count;
289*4882a593Smuzhiyun 			struct mvfrey_inlist_entry *inlist;
290*4882a593Smuzhiyun 			dma_addr_t inlist_phy;
291*4882a593Smuzhiyun 			__le32 inlist_wptr;
292*4882a593Smuzhiyun 			struct mvfrey_outlist_entry *outlist;
293*4882a593Smuzhiyun 			dma_addr_t outlist_phy;
294*4882a593Smuzhiyun 			__le32 *outlist_cptr; /* copy pointer shadow */
295*4882a593Smuzhiyun 			dma_addr_t outlist_cptr_phy;
296*4882a593Smuzhiyun 			__le32 outlist_rptr;
297*4882a593Smuzhiyun 		} mvfrey;
298*4882a593Smuzhiyun 	} u;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	struct Scsi_Host *host;
301*4882a593Smuzhiyun 	struct pci_dev *pcidev;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/* IOP config info */
304*4882a593Smuzhiyun 	u32     interface_version;
305*4882a593Smuzhiyun 	u32     firmware_version;
306*4882a593Smuzhiyun 	u32     sdram_size;
307*4882a593Smuzhiyun 	u32     max_devices;
308*4882a593Smuzhiyun 	u32     max_requests;
309*4882a593Smuzhiyun 	u32     max_request_size;
310*4882a593Smuzhiyun 	u32     max_sg_descriptors;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	u32     req_size; /* host-allocated request buffer size */
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	u32     iopintf_v2: 1;
315*4882a593Smuzhiyun 	u32     initialized: 1;
316*4882a593Smuzhiyun 	u32     msg_done: 1;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	struct hptiop_request * req_list;
319*4882a593Smuzhiyun 	struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* used to free allocated dma area */
322*4882a593Smuzhiyun 	void        *dma_coherent[HPTIOP_MAX_REQUESTS];
323*4882a593Smuzhiyun 	dma_addr_t  dma_coherent_handle[HPTIOP_MAX_REQUESTS];
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	atomic_t    reset_count;
326*4882a593Smuzhiyun 	atomic_t    resetting;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	wait_queue_head_t reset_wq;
329*4882a593Smuzhiyun 	wait_queue_head_t ioctl_wq;
330*4882a593Smuzhiyun };
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun struct hpt_ioctl_k {
333*4882a593Smuzhiyun 	struct hptiop_hba * hba;
334*4882a593Smuzhiyun 	u32    ioctl_code;
335*4882a593Smuzhiyun 	u32    inbuf_size;
336*4882a593Smuzhiyun 	u32    outbuf_size;
337*4882a593Smuzhiyun 	void   *inbuf;
338*4882a593Smuzhiyun 	void   *outbuf;
339*4882a593Smuzhiyun 	u32    *bytes_returned;
340*4882a593Smuzhiyun 	void (*done)(struct hpt_ioctl_k *);
341*4882a593Smuzhiyun 	int    result; /* HPT_IOCTL_RESULT_ */
342*4882a593Smuzhiyun };
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun struct hptiop_adapter_ops {
345*4882a593Smuzhiyun 	enum hptiop_family family;
346*4882a593Smuzhiyun 	int  (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
347*4882a593Smuzhiyun 	int  (*internal_memalloc)(struct hptiop_hba *hba);
348*4882a593Smuzhiyun 	int  (*internal_memfree)(struct hptiop_hba *hba);
349*4882a593Smuzhiyun 	int  (*map_pci_bar)(struct hptiop_hba *hba);
350*4882a593Smuzhiyun 	void (*unmap_pci_bar)(struct hptiop_hba *hba);
351*4882a593Smuzhiyun 	void (*enable_intr)(struct hptiop_hba *hba);
352*4882a593Smuzhiyun 	void (*disable_intr)(struct hptiop_hba *hba);
353*4882a593Smuzhiyun 	int  (*get_config)(struct hptiop_hba *hba,
354*4882a593Smuzhiyun 				struct hpt_iop_request_get_config *config);
355*4882a593Smuzhiyun 	int  (*set_config)(struct hptiop_hba *hba,
356*4882a593Smuzhiyun 				struct hpt_iop_request_set_config *config);
357*4882a593Smuzhiyun 	int  (*iop_intr)(struct hptiop_hba *hba);
358*4882a593Smuzhiyun 	void (*post_msg)(struct hptiop_hba *hba, u32 msg);
359*4882a593Smuzhiyun 	void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
360*4882a593Smuzhiyun 	int  hw_dma_bit_mask;
361*4882a593Smuzhiyun 	int  (*reset_comm)(struct hptiop_hba *hba);
362*4882a593Smuzhiyun 	__le64  host_phy_flag;
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun #define HPT_IOCTL_RESULT_OK         0
366*4882a593Smuzhiyun #define HPT_IOCTL_RESULT_FAILED     (-1)
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun #if 0
369*4882a593Smuzhiyun #define dprintk(fmt, args...) do { printk(fmt, ##args); } while(0)
370*4882a593Smuzhiyun #else
371*4882a593Smuzhiyun #define dprintk(fmt, args...)
372*4882a593Smuzhiyun #endif
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun #endif
375