xref: /OK3568_Linux_fs/kernel/drivers/rapidio/devices/tsi721.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2011 Integrated Device Technology, Inc.
6*4882a593Smuzhiyun  * Alexandre Bounine <alexandre.bounine@idt.com>
7*4882a593Smuzhiyun  * Chul Kim <chul.kim@idt.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/ioport.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/rio.h>
18*4882a593Smuzhiyun #include <linux/rio_drv.h>
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun #include <linux/interrupt.h>
21*4882a593Smuzhiyun #include <linux/kfifo.h>
22*4882a593Smuzhiyun #include <linux/delay.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "tsi721.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #ifdef DEBUG
27*4882a593Smuzhiyun u32 tsi_dbg_level;
28*4882a593Smuzhiyun module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
29*4882a593Smuzhiyun MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static int pcie_mrrs = -1;
33*4882a593Smuzhiyun module_param(pcie_mrrs, int, S_IRUGO);
34*4882a593Smuzhiyun MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)");
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static u8 mbox_sel = 0x0f;
37*4882a593Smuzhiyun module_param(mbox_sel, byte, S_IRUGO);
38*4882a593Smuzhiyun MODULE_PARM_DESC(mbox_sel,
39*4882a593Smuzhiyun 		 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static DEFINE_SPINLOCK(tsi721_maint_lock);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
44*4882a593Smuzhiyun static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /**
47*4882a593Smuzhiyun  * tsi721_lcread - read from local SREP config space
48*4882a593Smuzhiyun  * @mport: RapidIO master port info
49*4882a593Smuzhiyun  * @index: ID of RapdiIO interface
50*4882a593Smuzhiyun  * @offset: Offset into configuration space
51*4882a593Smuzhiyun  * @len: Length (in bytes) of the maintenance transaction
52*4882a593Smuzhiyun  * @data: Value to be read into
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  * Generates a local SREP space read. Returns %0 on
55*4882a593Smuzhiyun  * success or %-EINVAL on failure.
56*4882a593Smuzhiyun  */
tsi721_lcread(struct rio_mport * mport,int index,u32 offset,int len,u32 * data)57*4882a593Smuzhiyun static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
58*4882a593Smuzhiyun 			 int len, u32 *data)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (len != sizeof(u32))
63*4882a593Smuzhiyun 		return -EINVAL; /* only 32-bit access is supported */
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	*data = ioread32(priv->regs + offset);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	return 0;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun  * tsi721_lcwrite - write into local SREP config space
72*4882a593Smuzhiyun  * @mport: RapidIO master port info
73*4882a593Smuzhiyun  * @index: ID of RapdiIO interface
74*4882a593Smuzhiyun  * @offset: Offset into configuration space
75*4882a593Smuzhiyun  * @len: Length (in bytes) of the maintenance transaction
76*4882a593Smuzhiyun  * @data: Value to be written
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  * Generates a local write into SREP configuration space. Returns %0 on
79*4882a593Smuzhiyun  * success or %-EINVAL on failure.
80*4882a593Smuzhiyun  */
tsi721_lcwrite(struct rio_mport * mport,int index,u32 offset,int len,u32 data)81*4882a593Smuzhiyun static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
82*4882a593Smuzhiyun 			  int len, u32 data)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (len != sizeof(u32))
87*4882a593Smuzhiyun 		return -EINVAL; /* only 32-bit access is supported */
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	iowrite32(data, priv->regs + offset);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun  * tsi721_maint_dma - Helper function to generate RapidIO maintenance
96*4882a593Smuzhiyun  *                    transactions using designated Tsi721 DMA channel.
97*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
98*4882a593Smuzhiyun  * @sys_size: RapdiIO transport system size
99*4882a593Smuzhiyun  * @destid: Destination ID of transaction
100*4882a593Smuzhiyun  * @hopcount: Number of hops to target device
101*4882a593Smuzhiyun  * @offset: Offset into configuration space
102*4882a593Smuzhiyun  * @len: Length (in bytes) of the maintenance transaction
103*4882a593Smuzhiyun  * @data: Location to be read from or write into
104*4882a593Smuzhiyun  * @do_wr: Operation flag (1 == MAINT_WR)
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  * Generates a RapidIO maintenance transaction (Read or Write).
107*4882a593Smuzhiyun  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
108*4882a593Smuzhiyun  */
tsi721_maint_dma(struct tsi721_device * priv,u32 sys_size,u16 destid,u8 hopcount,u32 offset,int len,u32 * data,int do_wr)109*4882a593Smuzhiyun static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
110*4882a593Smuzhiyun 			u16 destid, u8 hopcount, u32 offset, int len,
111*4882a593Smuzhiyun 			u32 *data, int do_wr)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
114*4882a593Smuzhiyun 	struct tsi721_dma_desc *bd_ptr;
115*4882a593Smuzhiyun 	u32 rd_count, swr_ptr, ch_stat;
116*4882a593Smuzhiyun 	unsigned long flags;
117*4882a593Smuzhiyun 	int i, err = 0;
118*4882a593Smuzhiyun 	u32 op = do_wr ? MAINT_WR : MAINT_RD;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
121*4882a593Smuzhiyun 		return -EINVAL;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	spin_lock_irqsave(&tsi721_maint_lock, flags);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	bd_ptr = priv->mdma.bd_base;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* Initialize DMA descriptor */
130*4882a593Smuzhiyun 	bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
131*4882a593Smuzhiyun 	bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04);
132*4882a593Smuzhiyun 	bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset);
133*4882a593Smuzhiyun 	bd_ptr[0].raddr_hi = 0;
134*4882a593Smuzhiyun 	if (do_wr)
135*4882a593Smuzhiyun 		bd_ptr[0].data[0] = cpu_to_be32p(data);
136*4882a593Smuzhiyun 	else
137*4882a593Smuzhiyun 		bd_ptr[0].data[0] = 0xffffffff;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	mb();
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* Start DMA operation */
142*4882a593Smuzhiyun 	iowrite32(rd_count + 2,	regs + TSI721_DMAC_DWRCNT);
143*4882a593Smuzhiyun 	ioread32(regs + TSI721_DMAC_DWRCNT);
144*4882a593Smuzhiyun 	i = 0;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* Wait until DMA transfer is finished */
147*4882a593Smuzhiyun 	while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
148*4882a593Smuzhiyun 							& TSI721_DMAC_STS_RUN) {
149*4882a593Smuzhiyun 		udelay(1);
150*4882a593Smuzhiyun 		if (++i >= 5000000) {
151*4882a593Smuzhiyun 			tsi_debug(MAINT, &priv->pdev->dev,
152*4882a593Smuzhiyun 				"DMA[%d] read timeout ch_status=%x",
153*4882a593Smuzhiyun 				priv->mdma.ch_id, ch_stat);
154*4882a593Smuzhiyun 			if (!do_wr)
155*4882a593Smuzhiyun 				*data = 0xffffffff;
156*4882a593Smuzhiyun 			err = -EIO;
157*4882a593Smuzhiyun 			goto err_out;
158*4882a593Smuzhiyun 		}
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (ch_stat & TSI721_DMAC_STS_ABORT) {
162*4882a593Smuzhiyun 		/* If DMA operation aborted due to error,
163*4882a593Smuzhiyun 		 * reinitialize DMA channel
164*4882a593Smuzhiyun 		 */
165*4882a593Smuzhiyun 		tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x",
166*4882a593Smuzhiyun 			  ch_stat);
167*4882a593Smuzhiyun 		tsi_debug(MAINT, &priv->pdev->dev,
168*4882a593Smuzhiyun 			  "OP=%d : destid=%x hc=%x off=%x",
169*4882a593Smuzhiyun 			  do_wr ? MAINT_WR : MAINT_RD,
170*4882a593Smuzhiyun 			  destid, hopcount, offset);
171*4882a593Smuzhiyun 		iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
172*4882a593Smuzhiyun 		iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
173*4882a593Smuzhiyun 		udelay(10);
174*4882a593Smuzhiyun 		iowrite32(0, regs + TSI721_DMAC_DWRCNT);
175*4882a593Smuzhiyun 		udelay(1);
176*4882a593Smuzhiyun 		if (!do_wr)
177*4882a593Smuzhiyun 			*data = 0xffffffff;
178*4882a593Smuzhiyun 		err = -EIO;
179*4882a593Smuzhiyun 		goto err_out;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (!do_wr)
183*4882a593Smuzhiyun 		*data = be32_to_cpu(bd_ptr[0].data[0]);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/*
186*4882a593Smuzhiyun 	 * Update descriptor status FIFO RD pointer.
187*4882a593Smuzhiyun 	 * NOTE: Skipping check and clear FIFO entries because we are waiting
188*4882a593Smuzhiyun 	 * for transfer to be completed.
189*4882a593Smuzhiyun 	 */
190*4882a593Smuzhiyun 	swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
191*4882a593Smuzhiyun 	iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun err_out:
194*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tsi721_maint_lock, flags);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return err;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /**
200*4882a593Smuzhiyun  * tsi721_cread_dma - Generate a RapidIO maintenance read transaction
201*4882a593Smuzhiyun  *                    using Tsi721 BDMA engine.
202*4882a593Smuzhiyun  * @mport: RapidIO master port control structure
203*4882a593Smuzhiyun  * @index: ID of RapdiIO interface
204*4882a593Smuzhiyun  * @destid: Destination ID of transaction
205*4882a593Smuzhiyun  * @hopcount: Number of hops to target device
206*4882a593Smuzhiyun  * @offset: Offset into configuration space
207*4882a593Smuzhiyun  * @len: Length (in bytes) of the maintenance transaction
208*4882a593Smuzhiyun  * @val: Location to be read into
209*4882a593Smuzhiyun  *
210*4882a593Smuzhiyun  * Generates a RapidIO maintenance read transaction.
211*4882a593Smuzhiyun  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
212*4882a593Smuzhiyun  */
tsi721_cread_dma(struct rio_mport * mport,int index,u16 destid,u8 hopcount,u32 offset,int len,u32 * data)213*4882a593Smuzhiyun static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
214*4882a593Smuzhiyun 			u8 hopcount, u32 offset, int len, u32 *data)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
219*4882a593Smuzhiyun 				offset, len, data, 0);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun  * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction
224*4882a593Smuzhiyun  *                     using Tsi721 BDMA engine
225*4882a593Smuzhiyun  * @mport: RapidIO master port control structure
226*4882a593Smuzhiyun  * @index: ID of RapdiIO interface
227*4882a593Smuzhiyun  * @destid: Destination ID of transaction
228*4882a593Smuzhiyun  * @hopcount: Number of hops to target device
229*4882a593Smuzhiyun  * @offset: Offset into configuration space
230*4882a593Smuzhiyun  * @len: Length (in bytes) of the maintenance transaction
231*4882a593Smuzhiyun  * @val: Value to be written
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * Generates a RapidIO maintenance write transaction.
234*4882a593Smuzhiyun  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
235*4882a593Smuzhiyun  */
tsi721_cwrite_dma(struct rio_mport * mport,int index,u16 destid,u8 hopcount,u32 offset,int len,u32 data)236*4882a593Smuzhiyun static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
237*4882a593Smuzhiyun 			 u8 hopcount, u32 offset, int len, u32 data)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
240*4882a593Smuzhiyun 	u32 temp = data;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
243*4882a593Smuzhiyun 				offset, len, &temp, 1);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /**
247*4882a593Smuzhiyun  * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
248*4882a593Smuzhiyun  * @priv:  tsi721 device private structure
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * Handles inbound port-write interrupts. Copies PW message from an internal
251*4882a593Smuzhiyun  * buffer into PW message FIFO and schedules deferred routine to process
252*4882a593Smuzhiyun  * queued messages.
253*4882a593Smuzhiyun  */
254*4882a593Smuzhiyun static int
tsi721_pw_handler(struct tsi721_device * priv)255*4882a593Smuzhiyun tsi721_pw_handler(struct tsi721_device *priv)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	u32 pw_stat;
258*4882a593Smuzhiyun 	u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) {
264*4882a593Smuzhiyun 		pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0));
265*4882a593Smuzhiyun 		pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1));
266*4882a593Smuzhiyun 		pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2));
267*4882a593Smuzhiyun 		pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3));
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		/* Queue PW message (if there is room in FIFO),
270*4882a593Smuzhiyun 		 * otherwise discard it.
271*4882a593Smuzhiyun 		 */
272*4882a593Smuzhiyun 		spin_lock(&priv->pw_fifo_lock);
273*4882a593Smuzhiyun 		if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE)
274*4882a593Smuzhiyun 			kfifo_in(&priv->pw_fifo, pw_buf,
275*4882a593Smuzhiyun 						TSI721_RIO_PW_MSG_SIZE);
276*4882a593Smuzhiyun 		else
277*4882a593Smuzhiyun 			priv->pw_discard_count++;
278*4882a593Smuzhiyun 		spin_unlock(&priv->pw_fifo_lock);
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	/* Clear pending PW interrupts */
282*4882a593Smuzhiyun 	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
283*4882a593Smuzhiyun 		  priv->regs + TSI721_RIO_PW_RX_STAT);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	schedule_work(&priv->pw_work);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
tsi721_pw_dpc(struct work_struct * work)290*4882a593Smuzhiyun static void tsi721_pw_dpc(struct work_struct *work)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	struct tsi721_device *priv = container_of(work, struct tsi721_device,
293*4882a593Smuzhiyun 						    pw_work);
294*4882a593Smuzhiyun 	union rio_pw_msg pwmsg;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/*
297*4882a593Smuzhiyun 	 * Process port-write messages
298*4882a593Smuzhiyun 	 */
299*4882a593Smuzhiyun 	while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg,
300*4882a593Smuzhiyun 			 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
301*4882a593Smuzhiyun 		/* Pass the port-write message to RIO core for processing */
302*4882a593Smuzhiyun 		rio_inb_pwrite_handler(&priv->mport, &pwmsg);
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /**
307*4882a593Smuzhiyun  * tsi721_pw_enable - enable/disable port-write interface init
308*4882a593Smuzhiyun  * @mport: Master port implementing the port write unit
309*4882a593Smuzhiyun  * @enable:    1=enable; 0=disable port-write message handling
310*4882a593Smuzhiyun  */
tsi721_pw_enable(struct rio_mport * mport,int enable)311*4882a593Smuzhiyun static int tsi721_pw_enable(struct rio_mport *mport, int enable)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
314*4882a593Smuzhiyun 	u32 rval;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (enable)
319*4882a593Smuzhiyun 		rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX;
320*4882a593Smuzhiyun 	else
321*4882a593Smuzhiyun 		rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* Clear pending PW interrupts */
324*4882a593Smuzhiyun 	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
325*4882a593Smuzhiyun 		  priv->regs + TSI721_RIO_PW_RX_STAT);
326*4882a593Smuzhiyun 	/* Update enable bits */
327*4882a593Smuzhiyun 	iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	return 0;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /**
333*4882a593Smuzhiyun  * tsi721_dsend - Send a RapidIO doorbell
334*4882a593Smuzhiyun  * @mport: RapidIO master port info
335*4882a593Smuzhiyun  * @index: ID of RapidIO interface
336*4882a593Smuzhiyun  * @destid: Destination ID of target device
337*4882a593Smuzhiyun  * @data: 16-bit info field of RapidIO doorbell
338*4882a593Smuzhiyun  *
339*4882a593Smuzhiyun  * Sends a RapidIO doorbell message. Always returns %0.
340*4882a593Smuzhiyun  */
tsi721_dsend(struct rio_mport * mport,int index,u16 destid,u16 data)341*4882a593Smuzhiyun static int tsi721_dsend(struct rio_mport *mport, int index,
342*4882a593Smuzhiyun 			u16 destid, u16 data)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
345*4882a593Smuzhiyun 	u32 offset;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
348*4882a593Smuzhiyun 		 (destid << 2);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	tsi_debug(DBELL, &priv->pdev->dev,
351*4882a593Smuzhiyun 		  "Send Doorbell 0x%04x to destID 0x%x", data, destid);
352*4882a593Smuzhiyun 	iowrite16be(data, priv->odb_base + offset);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /**
358*4882a593Smuzhiyun  * tsi721_dbell_handler - Tsi721 doorbell interrupt handler
359*4882a593Smuzhiyun  * @priv: tsi721 device-specific data structure
360*4882a593Smuzhiyun  *
361*4882a593Smuzhiyun  * Handles inbound doorbell interrupts. Copies doorbell entry from an internal
362*4882a593Smuzhiyun  * buffer into DB message FIFO and schedules deferred  routine to process
363*4882a593Smuzhiyun  * queued DBs.
364*4882a593Smuzhiyun  */
365*4882a593Smuzhiyun static int
tsi721_dbell_handler(struct tsi721_device * priv)366*4882a593Smuzhiyun tsi721_dbell_handler(struct tsi721_device *priv)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	u32 regval;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/* Disable IDB interrupts */
371*4882a593Smuzhiyun 	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
372*4882a593Smuzhiyun 	regval &= ~TSI721_SR_CHINT_IDBQRCV;
373*4882a593Smuzhiyun 	iowrite32(regval,
374*4882a593Smuzhiyun 		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	schedule_work(&priv->idb_work);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	return 0;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
tsi721_db_dpc(struct work_struct * work)381*4882a593Smuzhiyun static void tsi721_db_dpc(struct work_struct *work)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	struct tsi721_device *priv = container_of(work, struct tsi721_device,
384*4882a593Smuzhiyun 						    idb_work);
385*4882a593Smuzhiyun 	struct rio_mport *mport;
386*4882a593Smuzhiyun 	struct rio_dbell *dbell;
387*4882a593Smuzhiyun 	int found = 0;
388*4882a593Smuzhiyun 	u32 wr_ptr, rd_ptr;
389*4882a593Smuzhiyun 	u64 *idb_entry;
390*4882a593Smuzhiyun 	u32 regval;
391*4882a593Smuzhiyun 	union {
392*4882a593Smuzhiyun 		u64 msg;
393*4882a593Smuzhiyun 		u8  bytes[8];
394*4882a593Smuzhiyun 	} idb;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/*
397*4882a593Smuzhiyun 	 * Process queued inbound doorbells
398*4882a593Smuzhiyun 	 */
399*4882a593Smuzhiyun 	mport = &priv->mport;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
402*4882a593Smuzhiyun 	rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	while (wr_ptr != rd_ptr) {
405*4882a593Smuzhiyun 		idb_entry = (u64 *)(priv->idb_base +
406*4882a593Smuzhiyun 					(TSI721_IDB_ENTRY_SIZE * rd_ptr));
407*4882a593Smuzhiyun 		rd_ptr++;
408*4882a593Smuzhiyun 		rd_ptr %= IDB_QSIZE;
409*4882a593Smuzhiyun 		idb.msg = *idb_entry;
410*4882a593Smuzhiyun 		*idb_entry = 0;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 		/* Process one doorbell */
413*4882a593Smuzhiyun 		list_for_each_entry(dbell, &mport->dbells, node) {
414*4882a593Smuzhiyun 			if ((dbell->res->start <= DBELL_INF(idb.bytes)) &&
415*4882a593Smuzhiyun 			    (dbell->res->end >= DBELL_INF(idb.bytes))) {
416*4882a593Smuzhiyun 				found = 1;
417*4882a593Smuzhiyun 				break;
418*4882a593Smuzhiyun 			}
419*4882a593Smuzhiyun 		}
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		if (found) {
422*4882a593Smuzhiyun 			dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
423*4882a593Smuzhiyun 				    DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
424*4882a593Smuzhiyun 		} else {
425*4882a593Smuzhiyun 			tsi_debug(DBELL, &priv->pdev->dev,
426*4882a593Smuzhiyun 				  "spurious IDB sid %2.2x tid %2.2x info %4.4x",
427*4882a593Smuzhiyun 				  DBELL_SID(idb.bytes), DBELL_TID(idb.bytes),
428*4882a593Smuzhiyun 				  DBELL_INF(idb.bytes));
429*4882a593Smuzhiyun 		}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		wr_ptr = ioread32(priv->regs +
432*4882a593Smuzhiyun 				  TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	iowrite32(rd_ptr & (IDB_QSIZE - 1),
436*4882a593Smuzhiyun 		priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/* Re-enable IDB interrupts */
439*4882a593Smuzhiyun 	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
440*4882a593Smuzhiyun 	regval |= TSI721_SR_CHINT_IDBQRCV;
441*4882a593Smuzhiyun 	iowrite32(regval,
442*4882a593Smuzhiyun 		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
445*4882a593Smuzhiyun 	if (wr_ptr != rd_ptr)
446*4882a593Smuzhiyun 		schedule_work(&priv->idb_work);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun /**
450*4882a593Smuzhiyun  * tsi721_irqhandler - Tsi721 interrupt handler
451*4882a593Smuzhiyun  * @irq: Linux interrupt number
452*4882a593Smuzhiyun  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
453*4882a593Smuzhiyun  *
454*4882a593Smuzhiyun  * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
455*4882a593Smuzhiyun  * interrupt events and calls an event-specific handler(s).
456*4882a593Smuzhiyun  */
tsi721_irqhandler(int irq,void * ptr)457*4882a593Smuzhiyun static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
460*4882a593Smuzhiyun 	u32 dev_int;
461*4882a593Smuzhiyun 	u32 dev_ch_int;
462*4882a593Smuzhiyun 	u32 intval;
463*4882a593Smuzhiyun 	u32 ch_inte;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* For MSI mode disable all device-level interrupts */
466*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSI)
467*4882a593Smuzhiyun 		iowrite32(0, priv->regs + TSI721_DEV_INTE);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	dev_int = ioread32(priv->regs + TSI721_DEV_INT);
470*4882a593Smuzhiyun 	if (!dev_int)
471*4882a593Smuzhiyun 		return IRQ_NONE;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (dev_int & TSI721_DEV_INT_SR2PC_CH) {
476*4882a593Smuzhiyun 		/* Service SR2PC Channel interrupts */
477*4882a593Smuzhiyun 		if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) {
478*4882a593Smuzhiyun 			/* Service Inbound Doorbell interrupt */
479*4882a593Smuzhiyun 			intval = ioread32(priv->regs +
480*4882a593Smuzhiyun 						TSI721_SR_CHINT(IDB_QUEUE));
481*4882a593Smuzhiyun 			if (intval & TSI721_SR_CHINT_IDBQRCV)
482*4882a593Smuzhiyun 				tsi721_dbell_handler(priv);
483*4882a593Smuzhiyun 			else
484*4882a593Smuzhiyun 				tsi_info(&priv->pdev->dev,
485*4882a593Smuzhiyun 					"Unsupported SR_CH_INT %x", intval);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 			/* Clear interrupts */
488*4882a593Smuzhiyun 			iowrite32(intval,
489*4882a593Smuzhiyun 				priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
490*4882a593Smuzhiyun 			ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
491*4882a593Smuzhiyun 		}
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if (dev_int & TSI721_DEV_INT_SMSG_CH) {
495*4882a593Smuzhiyun 		int ch;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 		/*
498*4882a593Smuzhiyun 		 * Service channel interrupts from Messaging Engine
499*4882a593Smuzhiyun 		 */
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 		if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */
502*4882a593Smuzhiyun 			/* Disable signaled OB MSG Channel interrupts */
503*4882a593Smuzhiyun 			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
504*4882a593Smuzhiyun 			ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M);
505*4882a593Smuzhiyun 			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 			/*
508*4882a593Smuzhiyun 			 * Process Inbound Message interrupt for each MBOX
509*4882a593Smuzhiyun 			 */
510*4882a593Smuzhiyun 			for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) {
511*4882a593Smuzhiyun 				if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch)))
512*4882a593Smuzhiyun 					continue;
513*4882a593Smuzhiyun 				tsi721_imsg_handler(priv, ch);
514*4882a593Smuzhiyun 			}
515*4882a593Smuzhiyun 		}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 		if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */
518*4882a593Smuzhiyun 			/* Disable signaled OB MSG Channel interrupts */
519*4882a593Smuzhiyun 			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
520*4882a593Smuzhiyun 			ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M);
521*4882a593Smuzhiyun 			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 			/*
524*4882a593Smuzhiyun 			 * Process Outbound Message interrupts for each MBOX
525*4882a593Smuzhiyun 			 */
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 			for (ch = 0; ch < RIO_MAX_MBOX; ch++) {
528*4882a593Smuzhiyun 				if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch)))
529*4882a593Smuzhiyun 					continue;
530*4882a593Smuzhiyun 				tsi721_omsg_handler(priv, ch);
531*4882a593Smuzhiyun 			}
532*4882a593Smuzhiyun 		}
533*4882a593Smuzhiyun 	}
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (dev_int & TSI721_DEV_INT_SRIO) {
536*4882a593Smuzhiyun 		/* Service SRIO MAC interrupts */
537*4882a593Smuzhiyun 		intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
538*4882a593Smuzhiyun 		if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
539*4882a593Smuzhiyun 			tsi721_pw_handler(priv);
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun #ifdef CONFIG_RAPIDIO_DMA_ENGINE
543*4882a593Smuzhiyun 	if (dev_int & TSI721_DEV_INT_BDMA_CH) {
544*4882a593Smuzhiyun 		int ch;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 		if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
547*4882a593Smuzhiyun 			tsi_debug(DMA, &priv->pdev->dev,
548*4882a593Smuzhiyun 				  "IRQ from DMA channel 0x%08x", dev_ch_int);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 			for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
551*4882a593Smuzhiyun 				if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
552*4882a593Smuzhiyun 					continue;
553*4882a593Smuzhiyun 				tsi721_bdma_handler(&priv->bdma[ch]);
554*4882a593Smuzhiyun 			}
555*4882a593Smuzhiyun 		}
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun #endif
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/* For MSI mode re-enable device-level interrupts */
560*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSI) {
561*4882a593Smuzhiyun 		dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
562*4882a593Smuzhiyun 			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
563*4882a593Smuzhiyun 		iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	return IRQ_HANDLED;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
tsi721_interrupts_init(struct tsi721_device * priv)569*4882a593Smuzhiyun static void tsi721_interrupts_init(struct tsi721_device *priv)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun 	u32 intr;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/* Enable IDB interrupts */
574*4882a593Smuzhiyun 	iowrite32(TSI721_SR_CHINT_ALL,
575*4882a593Smuzhiyun 		priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
576*4882a593Smuzhiyun 	iowrite32(TSI721_SR_CHINT_IDBQRCV,
577*4882a593Smuzhiyun 		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/* Enable SRIO MAC interrupts */
580*4882a593Smuzhiyun 	iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
581*4882a593Smuzhiyun 		priv->regs + TSI721_RIO_EM_DEV_INT_EN);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	/* Enable interrupts from channels in use */
584*4882a593Smuzhiyun #ifdef CONFIG_RAPIDIO_DMA_ENGINE
585*4882a593Smuzhiyun 	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
586*4882a593Smuzhiyun 		(TSI721_INT_BDMA_CHAN_M &
587*4882a593Smuzhiyun 		 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
588*4882a593Smuzhiyun #else
589*4882a593Smuzhiyun 	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
590*4882a593Smuzhiyun #endif
591*4882a593Smuzhiyun 	iowrite32(intr,	priv->regs + TSI721_DEV_CHAN_INTE);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX)
594*4882a593Smuzhiyun 		intr = TSI721_DEV_INT_SRIO;
595*4882a593Smuzhiyun 	else
596*4882a593Smuzhiyun 		intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
597*4882a593Smuzhiyun 			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	iowrite32(intr, priv->regs + TSI721_DEV_INTE);
600*4882a593Smuzhiyun 	ioread32(priv->regs + TSI721_DEV_INTE);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
604*4882a593Smuzhiyun /**
605*4882a593Smuzhiyun  * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
606*4882a593Smuzhiyun  * @irq: Linux interrupt number
607*4882a593Smuzhiyun  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
608*4882a593Smuzhiyun  *
609*4882a593Smuzhiyun  * Handles outbound messaging interrupts signaled using MSI-X.
610*4882a593Smuzhiyun  */
tsi721_omsg_msix(int irq,void * ptr)611*4882a593Smuzhiyun static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
614*4882a593Smuzhiyun 	int mbox;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
617*4882a593Smuzhiyun 	tsi721_omsg_handler(priv, mbox);
618*4882a593Smuzhiyun 	return IRQ_HANDLED;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun /**
622*4882a593Smuzhiyun  * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
623*4882a593Smuzhiyun  * @irq: Linux interrupt number
624*4882a593Smuzhiyun  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
625*4882a593Smuzhiyun  *
626*4882a593Smuzhiyun  * Handles inbound messaging interrupts signaled using MSI-X.
627*4882a593Smuzhiyun  */
tsi721_imsg_msix(int irq,void * ptr)628*4882a593Smuzhiyun static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
631*4882a593Smuzhiyun 	int mbox;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
634*4882a593Smuzhiyun 	tsi721_imsg_handler(priv, mbox + 4);
635*4882a593Smuzhiyun 	return IRQ_HANDLED;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /**
639*4882a593Smuzhiyun  * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
640*4882a593Smuzhiyun  * @irq: Linux interrupt number
641*4882a593Smuzhiyun  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
642*4882a593Smuzhiyun  *
643*4882a593Smuzhiyun  * Handles Tsi721 interrupts from SRIO MAC.
644*4882a593Smuzhiyun  */
tsi721_srio_msix(int irq,void * ptr)645*4882a593Smuzhiyun static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
648*4882a593Smuzhiyun 	u32 srio_int;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/* Service SRIO MAC interrupts */
651*4882a593Smuzhiyun 	srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
652*4882a593Smuzhiyun 	if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
653*4882a593Smuzhiyun 		tsi721_pw_handler(priv);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	return IRQ_HANDLED;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun /**
659*4882a593Smuzhiyun  * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
660*4882a593Smuzhiyun  * @irq: Linux interrupt number
661*4882a593Smuzhiyun  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
662*4882a593Smuzhiyun  *
663*4882a593Smuzhiyun  * Handles Tsi721 interrupts from SR2PC Channel.
664*4882a593Smuzhiyun  * NOTE: At this moment services only one SR2PC channel associated with inbound
665*4882a593Smuzhiyun  * doorbells.
666*4882a593Smuzhiyun  */
tsi721_sr2pc_ch_msix(int irq,void * ptr)667*4882a593Smuzhiyun static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
670*4882a593Smuzhiyun 	u32 sr_ch_int;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	/* Service Inbound DB interrupt from SR2PC channel */
673*4882a593Smuzhiyun 	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
674*4882a593Smuzhiyun 	if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
675*4882a593Smuzhiyun 		tsi721_dbell_handler(priv);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	/* Clear interrupts */
678*4882a593Smuzhiyun 	iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
679*4882a593Smuzhiyun 	/* Read back to ensure that interrupt was cleared */
680*4882a593Smuzhiyun 	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	return IRQ_HANDLED;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun /**
686*4882a593Smuzhiyun  * tsi721_request_msix - register interrupt service for MSI-X mode.
687*4882a593Smuzhiyun  * @priv: tsi721 device-specific data structure
688*4882a593Smuzhiyun  *
689*4882a593Smuzhiyun  * Registers MSI-X interrupt service routines for interrupts that are active
690*4882a593Smuzhiyun  * immediately after mport initialization. Messaging interrupt service routines
691*4882a593Smuzhiyun  * should be registered during corresponding open requests.
692*4882a593Smuzhiyun  */
tsi721_request_msix(struct tsi721_device * priv)693*4882a593Smuzhiyun static int tsi721_request_msix(struct tsi721_device *priv)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun 	int err = 0;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
698*4882a593Smuzhiyun 			tsi721_sr2pc_ch_msix, 0,
699*4882a593Smuzhiyun 			priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv);
700*4882a593Smuzhiyun 	if (err)
701*4882a593Smuzhiyun 		return err;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
704*4882a593Smuzhiyun 			tsi721_srio_msix, 0,
705*4882a593Smuzhiyun 			priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv);
706*4882a593Smuzhiyun 	if (err) {
707*4882a593Smuzhiyun 		free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
708*4882a593Smuzhiyun 		return err;
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	return 0;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun /**
715*4882a593Smuzhiyun  * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721.
716*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
717*4882a593Smuzhiyun  *
718*4882a593Smuzhiyun  * Configures MSI-X support for Tsi721. Supports only an exact number
719*4882a593Smuzhiyun  * of requested vectors.
720*4882a593Smuzhiyun  */
tsi721_enable_msix(struct tsi721_device * priv)721*4882a593Smuzhiyun static int tsi721_enable_msix(struct tsi721_device *priv)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	struct msix_entry entries[TSI721_VECT_MAX];
724*4882a593Smuzhiyun 	int err;
725*4882a593Smuzhiyun 	int i;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE);
728*4882a593Smuzhiyun 	entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/*
731*4882a593Smuzhiyun 	 * Initialize MSI-X entries for Messaging Engine:
732*4882a593Smuzhiyun 	 * this driver supports four RIO mailboxes (inbound and outbound)
733*4882a593Smuzhiyun 	 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore
734*4882a593Smuzhiyun 	 * offset +4 is added to IB MBOX number.
735*4882a593Smuzhiyun 	 */
736*4882a593Smuzhiyun 	for (i = 0; i < RIO_MAX_MBOX; i++) {
737*4882a593Smuzhiyun 		entries[TSI721_VECT_IMB0_RCV + i].entry =
738*4882a593Smuzhiyun 					TSI721_MSIX_IMSG_DQ_RCV(i + 4);
739*4882a593Smuzhiyun 		entries[TSI721_VECT_IMB0_INT + i].entry =
740*4882a593Smuzhiyun 					TSI721_MSIX_IMSG_INT(i + 4);
741*4882a593Smuzhiyun 		entries[TSI721_VECT_OMB0_DONE + i].entry =
742*4882a593Smuzhiyun 					TSI721_MSIX_OMSG_DONE(i);
743*4882a593Smuzhiyun 		entries[TSI721_VECT_OMB0_INT + i].entry =
744*4882a593Smuzhiyun 					TSI721_MSIX_OMSG_INT(i);
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun #ifdef CONFIG_RAPIDIO_DMA_ENGINE
748*4882a593Smuzhiyun 	/*
749*4882a593Smuzhiyun 	 * Initialize MSI-X entries for Block DMA Engine:
750*4882a593Smuzhiyun 	 * this driver supports XXX DMA channels
751*4882a593Smuzhiyun 	 * (one is reserved for SRIO maintenance transactions)
752*4882a593Smuzhiyun 	 */
753*4882a593Smuzhiyun 	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
754*4882a593Smuzhiyun 		entries[TSI721_VECT_DMA0_DONE + i].entry =
755*4882a593Smuzhiyun 					TSI721_MSIX_DMACH_DONE(i);
756*4882a593Smuzhiyun 		entries[TSI721_VECT_DMA0_INT + i].entry =
757*4882a593Smuzhiyun 					TSI721_MSIX_DMACH_INT(i);
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries));
762*4882a593Smuzhiyun 	if (err) {
763*4882a593Smuzhiyun 		tsi_err(&priv->pdev->dev,
764*4882a593Smuzhiyun 			"Failed to enable MSI-X (err=%d)", err);
765*4882a593Smuzhiyun 		return err;
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	/*
769*4882a593Smuzhiyun 	 * Copy MSI-X vector information into tsi721 private structure
770*4882a593Smuzhiyun 	 */
771*4882a593Smuzhiyun 	priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector;
772*4882a593Smuzhiyun 	snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX,
773*4882a593Smuzhiyun 		 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev));
774*4882a593Smuzhiyun 	priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector;
775*4882a593Smuzhiyun 	snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX,
776*4882a593Smuzhiyun 		 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev));
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	for (i = 0; i < RIO_MAX_MBOX; i++) {
779*4882a593Smuzhiyun 		priv->msix[TSI721_VECT_IMB0_RCV + i].vector =
780*4882a593Smuzhiyun 				entries[TSI721_VECT_IMB0_RCV + i].vector;
781*4882a593Smuzhiyun 		snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name,
782*4882a593Smuzhiyun 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s",
783*4882a593Smuzhiyun 			 i, pci_name(priv->pdev));
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 		priv->msix[TSI721_VECT_IMB0_INT + i].vector =
786*4882a593Smuzhiyun 				entries[TSI721_VECT_IMB0_INT + i].vector;
787*4882a593Smuzhiyun 		snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name,
788*4882a593Smuzhiyun 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s",
789*4882a593Smuzhiyun 			 i, pci_name(priv->pdev));
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 		priv->msix[TSI721_VECT_OMB0_DONE + i].vector =
792*4882a593Smuzhiyun 				entries[TSI721_VECT_OMB0_DONE + i].vector;
793*4882a593Smuzhiyun 		snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name,
794*4882a593Smuzhiyun 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s",
795*4882a593Smuzhiyun 			 i, pci_name(priv->pdev));
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		priv->msix[TSI721_VECT_OMB0_INT + i].vector =
798*4882a593Smuzhiyun 				entries[TSI721_VECT_OMB0_INT + i].vector;
799*4882a593Smuzhiyun 		snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name,
800*4882a593Smuzhiyun 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s",
801*4882a593Smuzhiyun 			 i, pci_name(priv->pdev));
802*4882a593Smuzhiyun 	}
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun #ifdef CONFIG_RAPIDIO_DMA_ENGINE
805*4882a593Smuzhiyun 	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
806*4882a593Smuzhiyun 		priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
807*4882a593Smuzhiyun 				entries[TSI721_VECT_DMA0_DONE + i].vector;
808*4882a593Smuzhiyun 		snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
809*4882a593Smuzhiyun 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
810*4882a593Smuzhiyun 			 i, pci_name(priv->pdev));
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 		priv->msix[TSI721_VECT_DMA0_INT + i].vector =
813*4882a593Smuzhiyun 				entries[TSI721_VECT_DMA0_INT + i].vector;
814*4882a593Smuzhiyun 		snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
815*4882a593Smuzhiyun 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
816*4882a593Smuzhiyun 			 i, pci_name(priv->pdev));
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	return 0;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
823*4882a593Smuzhiyun 
tsi721_request_irq(struct tsi721_device * priv)824*4882a593Smuzhiyun static int tsi721_request_irq(struct tsi721_device *priv)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun 	int err;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
829*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX)
830*4882a593Smuzhiyun 		err = tsi721_request_msix(priv);
831*4882a593Smuzhiyun 	else
832*4882a593Smuzhiyun #endif
833*4882a593Smuzhiyun 		err = request_irq(priv->pdev->irq, tsi721_irqhandler,
834*4882a593Smuzhiyun 			  (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
835*4882a593Smuzhiyun 			  DRV_NAME, (void *)priv);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (err)
838*4882a593Smuzhiyun 		tsi_err(&priv->pdev->dev,
839*4882a593Smuzhiyun 			"Unable to allocate interrupt, err=%d", err);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	return err;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
tsi721_free_irq(struct tsi721_device * priv)844*4882a593Smuzhiyun static void tsi721_free_irq(struct tsi721_device *priv)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
847*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX) {
848*4882a593Smuzhiyun 		free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
849*4882a593Smuzhiyun 		free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv);
850*4882a593Smuzhiyun 	} else
851*4882a593Smuzhiyun #endif
852*4882a593Smuzhiyun 	free_irq(priv->pdev->irq, (void *)priv);
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun static int
tsi721_obw_alloc(struct tsi721_device * priv,struct tsi721_obw_bar * pbar,u32 size,int * win_id)856*4882a593Smuzhiyun tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar,
857*4882a593Smuzhiyun 		 u32 size, int *win_id)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	u64 win_base;
860*4882a593Smuzhiyun 	u64 bar_base;
861*4882a593Smuzhiyun 	u64 bar_end;
862*4882a593Smuzhiyun 	u32 align;
863*4882a593Smuzhiyun 	struct tsi721_ob_win *win;
864*4882a593Smuzhiyun 	struct tsi721_ob_win *new_win = NULL;
865*4882a593Smuzhiyun 	int new_win_idx = -1;
866*4882a593Smuzhiyun 	int i = 0;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	bar_base = pbar->base;
869*4882a593Smuzhiyun 	bar_end =  bar_base + pbar->size;
870*4882a593Smuzhiyun 	win_base = bar_base;
871*4882a593Smuzhiyun 	align = size/TSI721_PC2SR_ZONES;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	while (i < TSI721_IBWIN_NUM) {
874*4882a593Smuzhiyun 		for (i = 0; i < TSI721_IBWIN_NUM; i++) {
875*4882a593Smuzhiyun 			if (!priv->ob_win[i].active) {
876*4882a593Smuzhiyun 				if (new_win == NULL) {
877*4882a593Smuzhiyun 					new_win = &priv->ob_win[i];
878*4882a593Smuzhiyun 					new_win_idx = i;
879*4882a593Smuzhiyun 				}
880*4882a593Smuzhiyun 				continue;
881*4882a593Smuzhiyun 			}
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 			/*
884*4882a593Smuzhiyun 			 * If this window belongs to the current BAR check it
885*4882a593Smuzhiyun 			 * for overlap
886*4882a593Smuzhiyun 			 */
887*4882a593Smuzhiyun 			win = &priv->ob_win[i];
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 			if (win->base >= bar_base && win->base < bar_end) {
890*4882a593Smuzhiyun 				if (win_base < (win->base + win->size) &&
891*4882a593Smuzhiyun 						(win_base + size) > win->base) {
892*4882a593Smuzhiyun 					/* Overlap detected */
893*4882a593Smuzhiyun 					win_base = win->base + win->size;
894*4882a593Smuzhiyun 					win_base = ALIGN(win_base, align);
895*4882a593Smuzhiyun 					break;
896*4882a593Smuzhiyun 				}
897*4882a593Smuzhiyun 			}
898*4882a593Smuzhiyun 		}
899*4882a593Smuzhiyun 	}
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	if (win_base + size > bar_end)
902*4882a593Smuzhiyun 		return -ENOMEM;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (!new_win) {
905*4882a593Smuzhiyun 		tsi_err(&priv->pdev->dev, "OBW count tracking failed");
906*4882a593Smuzhiyun 		return -EIO;
907*4882a593Smuzhiyun 	}
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	new_win->active = true;
910*4882a593Smuzhiyun 	new_win->base = win_base;
911*4882a593Smuzhiyun 	new_win->size = size;
912*4882a593Smuzhiyun 	new_win->pbar = pbar;
913*4882a593Smuzhiyun 	priv->obwin_cnt--;
914*4882a593Smuzhiyun 	pbar->free -= size;
915*4882a593Smuzhiyun 	*win_id = new_win_idx;
916*4882a593Smuzhiyun 	return 0;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun 
tsi721_map_outb_win(struct rio_mport * mport,u16 destid,u64 rstart,u32 size,u32 flags,dma_addr_t * laddr)919*4882a593Smuzhiyun static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart,
920*4882a593Smuzhiyun 			u32 size, u32 flags, dma_addr_t *laddr)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
923*4882a593Smuzhiyun 	int i;
924*4882a593Smuzhiyun 	struct tsi721_obw_bar *pbar;
925*4882a593Smuzhiyun 	struct tsi721_ob_win *ob_win;
926*4882a593Smuzhiyun 	int obw = -1;
927*4882a593Smuzhiyun 	u32 rval;
928*4882a593Smuzhiyun 	u64 rio_addr;
929*4882a593Smuzhiyun 	u32 zsize;
930*4882a593Smuzhiyun 	int ret = -ENOMEM;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	tsi_debug(OBW, &priv->pdev->dev,
933*4882a593Smuzhiyun 		  "did=%d ra=0x%llx sz=0x%x", destid, rstart, size);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1)))
936*4882a593Smuzhiyun 		return -EINVAL;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	if (priv->obwin_cnt == 0)
939*4882a593Smuzhiyun 		return -EBUSY;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
942*4882a593Smuzhiyun 		if (priv->p2r_bar[i].free >= size) {
943*4882a593Smuzhiyun 			pbar = &priv->p2r_bar[i];
944*4882a593Smuzhiyun 			ret = tsi721_obw_alloc(priv, pbar, size, &obw);
945*4882a593Smuzhiyun 			if (!ret)
946*4882a593Smuzhiyun 				break;
947*4882a593Smuzhiyun 		}
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	if (ret)
951*4882a593Smuzhiyun 		return ret;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	WARN_ON(obw == -1);
954*4882a593Smuzhiyun 	ob_win = &priv->ob_win[obw];
955*4882a593Smuzhiyun 	ob_win->destid = destid;
956*4882a593Smuzhiyun 	ob_win->rstart = rstart;
957*4882a593Smuzhiyun 	tsi_debug(OBW, &priv->pdev->dev,
958*4882a593Smuzhiyun 		  "allocated OBW%d @%llx", obw, ob_win->base);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	/*
961*4882a593Smuzhiyun 	 * Configure Outbound Window
962*4882a593Smuzhiyun 	 */
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	zsize = size/TSI721_PC2SR_ZONES;
965*4882a593Smuzhiyun 	rio_addr = rstart;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	/*
968*4882a593Smuzhiyun 	 * Program Address Translation Zones:
969*4882a593Smuzhiyun 	 *  This implementation uses all 8 zones associated wit window.
970*4882a593Smuzhiyun 	 */
971*4882a593Smuzhiyun 	for (i = 0; i < TSI721_PC2SR_ZONES; i++) {
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 		while (ioread32(priv->regs + TSI721_ZONE_SEL) &
974*4882a593Smuzhiyun 			TSI721_ZONE_SEL_GO) {
975*4882a593Smuzhiyun 			udelay(1);
976*4882a593Smuzhiyun 		}
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 		rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) |
979*4882a593Smuzhiyun 			TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR;
980*4882a593Smuzhiyun 		iowrite32(rval, priv->regs + TSI721_LUT_DATA0);
981*4882a593Smuzhiyun 		rval = (u32)(rio_addr >> 32);
982*4882a593Smuzhiyun 		iowrite32(rval, priv->regs + TSI721_LUT_DATA1);
983*4882a593Smuzhiyun 		rval = destid;
984*4882a593Smuzhiyun 		iowrite32(rval, priv->regs + TSI721_LUT_DATA2);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 		rval = TSI721_ZONE_SEL_GO | (obw << 3) | i;
987*4882a593Smuzhiyun 		iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 		rio_addr += zsize;
990*4882a593Smuzhiyun 	}
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	iowrite32(TSI721_OBWIN_SIZE(size) << 8,
993*4882a593Smuzhiyun 		  priv->regs + TSI721_OBWINSZ(obw));
994*4882a593Smuzhiyun 	iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw));
995*4882a593Smuzhiyun 	iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN,
996*4882a593Smuzhiyun 		  priv->regs + TSI721_OBWINLB(obw));
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	*laddr = ob_win->base;
999*4882a593Smuzhiyun 	return 0;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
tsi721_unmap_outb_win(struct rio_mport * mport,u16 destid,u64 rstart)1002*4882a593Smuzhiyun static void tsi721_unmap_outb_win(struct rio_mport *mport,
1003*4882a593Smuzhiyun 				  u16 destid, u64 rstart)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
1006*4882a593Smuzhiyun 	struct tsi721_ob_win *ob_win;
1007*4882a593Smuzhiyun 	int i;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	for (i = 0; i < TSI721_OBWIN_NUM; i++) {
1012*4882a593Smuzhiyun 		ob_win = &priv->ob_win[i];
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 		if (ob_win->active &&
1015*4882a593Smuzhiyun 		    ob_win->destid == destid && ob_win->rstart == rstart) {
1016*4882a593Smuzhiyun 			tsi_debug(OBW, &priv->pdev->dev,
1017*4882a593Smuzhiyun 				  "free OBW%d @%llx", i, ob_win->base);
1018*4882a593Smuzhiyun 			ob_win->active = false;
1019*4882a593Smuzhiyun 			iowrite32(0, priv->regs + TSI721_OBWINLB(i));
1020*4882a593Smuzhiyun 			ob_win->pbar->free += ob_win->size;
1021*4882a593Smuzhiyun 			priv->obwin_cnt++;
1022*4882a593Smuzhiyun 			break;
1023*4882a593Smuzhiyun 		}
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun /**
1028*4882a593Smuzhiyun  * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
1029*4882a593Smuzhiyun  * translation regions.
1030*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
1031*4882a593Smuzhiyun  *
1032*4882a593Smuzhiyun  * Disables SREP translation regions.
1033*4882a593Smuzhiyun  */
tsi721_init_pc2sr_mapping(struct tsi721_device * priv)1034*4882a593Smuzhiyun static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun 	int i, z;
1037*4882a593Smuzhiyun 	u32 rval;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	/* Disable all PC2SR translation windows */
1040*4882a593Smuzhiyun 	for (i = 0; i < TSI721_OBWIN_NUM; i++)
1041*4882a593Smuzhiyun 		iowrite32(0, priv->regs + TSI721_OBWINLB(i));
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	/* Initialize zone lookup tables to avoid ECC errors on reads */
1044*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_LUT_DATA0);
1045*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_LUT_DATA1);
1046*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_LUT_DATA2);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	for (i = 0; i < TSI721_OBWIN_NUM; i++) {
1049*4882a593Smuzhiyun 		for (z = 0; z < TSI721_PC2SR_ZONES; z++) {
1050*4882a593Smuzhiyun 			while (ioread32(priv->regs + TSI721_ZONE_SEL) &
1051*4882a593Smuzhiyun 				TSI721_ZONE_SEL_GO) {
1052*4882a593Smuzhiyun 				udelay(1);
1053*4882a593Smuzhiyun 			}
1054*4882a593Smuzhiyun 			rval = TSI721_ZONE_SEL_GO | (i << 3) | z;
1055*4882a593Smuzhiyun 			iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
1056*4882a593Smuzhiyun 		}
1057*4882a593Smuzhiyun 	}
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) {
1060*4882a593Smuzhiyun 		priv->obwin_cnt = 0;
1061*4882a593Smuzhiyun 		return;
1062*4882a593Smuzhiyun 	}
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	priv->p2r_bar[0].free = priv->p2r_bar[0].size;
1065*4882a593Smuzhiyun 	priv->p2r_bar[1].free = priv->p2r_bar[1].size;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	for (i = 0; i < TSI721_OBWIN_NUM; i++)
1068*4882a593Smuzhiyun 		priv->ob_win[i].active = false;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	priv->obwin_cnt = TSI721_OBWIN_NUM;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun /**
1074*4882a593Smuzhiyun  * tsi721_rio_map_inb_mem -- Mapping inbound memory region.
1075*4882a593Smuzhiyun  * @mport: RapidIO master port
1076*4882a593Smuzhiyun  * @lstart: Local memory space start address.
1077*4882a593Smuzhiyun  * @rstart: RapidIO space start address.
1078*4882a593Smuzhiyun  * @size: The mapping region size.
1079*4882a593Smuzhiyun  * @flags: Flags for mapping. 0 for using default flags.
1080*4882a593Smuzhiyun  *
1081*4882a593Smuzhiyun  * Return: 0 -- Success.
1082*4882a593Smuzhiyun  *
1083*4882a593Smuzhiyun  * This function will create the inbound mapping
1084*4882a593Smuzhiyun  * from rstart to lstart.
1085*4882a593Smuzhiyun  */
tsi721_rio_map_inb_mem(struct rio_mport * mport,dma_addr_t lstart,u64 rstart,u64 size,u32 flags)1086*4882a593Smuzhiyun static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
1087*4882a593Smuzhiyun 		u64 rstart, u64 size, u32 flags)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
1090*4882a593Smuzhiyun 	int i, avail = -1;
1091*4882a593Smuzhiyun 	u32 regval;
1092*4882a593Smuzhiyun 	struct tsi721_ib_win *ib_win;
1093*4882a593Smuzhiyun 	bool direct = (lstart == rstart);
1094*4882a593Smuzhiyun 	u64 ibw_size;
1095*4882a593Smuzhiyun 	dma_addr_t loc_start;
1096*4882a593Smuzhiyun 	u64 ibw_start;
1097*4882a593Smuzhiyun 	struct tsi721_ib_win_mapping *map = NULL;
1098*4882a593Smuzhiyun 	int ret = -EBUSY;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	/* Max IBW size supported by HW is 16GB */
1101*4882a593Smuzhiyun 	if (size > 0x400000000UL)
1102*4882a593Smuzhiyun 		return -EINVAL;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	if (direct) {
1105*4882a593Smuzhiyun 		/* Calculate minimal acceptable window size and base address */
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 		ibw_size = roundup_pow_of_two(size);
1108*4882a593Smuzhiyun 		ibw_start = lstart & ~(ibw_size - 1);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 		tsi_debug(IBW, &priv->pdev->dev,
1111*4882a593Smuzhiyun 			"Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx",
1112*4882a593Smuzhiyun 			rstart, &lstart, size, ibw_start);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 		while ((lstart + size) > (ibw_start + ibw_size)) {
1115*4882a593Smuzhiyun 			ibw_size *= 2;
1116*4882a593Smuzhiyun 			ibw_start = lstart & ~(ibw_size - 1);
1117*4882a593Smuzhiyun 			/* Check for crossing IBW max size 16GB */
1118*4882a593Smuzhiyun 			if (ibw_size > 0x400000000UL)
1119*4882a593Smuzhiyun 				return -EBUSY;
1120*4882a593Smuzhiyun 		}
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 		loc_start = ibw_start;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 		map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC);
1125*4882a593Smuzhiyun 		if (map == NULL)
1126*4882a593Smuzhiyun 			return -ENOMEM;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	} else {
1129*4882a593Smuzhiyun 		tsi_debug(IBW, &priv->pdev->dev,
1130*4882a593Smuzhiyun 			"Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
1131*4882a593Smuzhiyun 			rstart, &lstart, size);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 		if (!is_power_of_2(size) || size < 0x1000 ||
1134*4882a593Smuzhiyun 		    ((u64)lstart & (size - 1)) || (rstart & (size - 1)))
1135*4882a593Smuzhiyun 			return -EINVAL;
1136*4882a593Smuzhiyun 		if (priv->ibwin_cnt == 0)
1137*4882a593Smuzhiyun 			return -EBUSY;
1138*4882a593Smuzhiyun 		ibw_start = rstart;
1139*4882a593Smuzhiyun 		ibw_size = size;
1140*4882a593Smuzhiyun 		loc_start = lstart;
1141*4882a593Smuzhiyun 	}
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	/*
1144*4882a593Smuzhiyun 	 * Scan for overlapping with active regions and mark the first available
1145*4882a593Smuzhiyun 	 * IB window at the same time.
1146*4882a593Smuzhiyun 	 */
1147*4882a593Smuzhiyun 	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1148*4882a593Smuzhiyun 		ib_win = &priv->ib_win[i];
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		if (!ib_win->active) {
1151*4882a593Smuzhiyun 			if (avail == -1) {
1152*4882a593Smuzhiyun 				avail = i;
1153*4882a593Smuzhiyun 				ret = 0;
1154*4882a593Smuzhiyun 			}
1155*4882a593Smuzhiyun 		} else if (ibw_start < (ib_win->rstart + ib_win->size) &&
1156*4882a593Smuzhiyun 			   (ibw_start + ibw_size) > ib_win->rstart) {
1157*4882a593Smuzhiyun 			/* Return error if address translation involved */
1158*4882a593Smuzhiyun 			if (!direct || ib_win->xlat) {
1159*4882a593Smuzhiyun 				ret = -EFAULT;
1160*4882a593Smuzhiyun 				break;
1161*4882a593Smuzhiyun 			}
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 			/*
1164*4882a593Smuzhiyun 			 * Direct mappings usually are larger than originally
1165*4882a593Smuzhiyun 			 * requested fragments - check if this new request fits
1166*4882a593Smuzhiyun 			 * into it.
1167*4882a593Smuzhiyun 			 */
1168*4882a593Smuzhiyun 			if (rstart >= ib_win->rstart &&
1169*4882a593Smuzhiyun 			    (rstart + size) <= (ib_win->rstart +
1170*4882a593Smuzhiyun 							ib_win->size)) {
1171*4882a593Smuzhiyun 				/* We are in - no further mapping required */
1172*4882a593Smuzhiyun 				map->lstart = lstart;
1173*4882a593Smuzhiyun 				list_add_tail(&map->node, &ib_win->mappings);
1174*4882a593Smuzhiyun 				return 0;
1175*4882a593Smuzhiyun 			}
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 			ret = -EFAULT;
1178*4882a593Smuzhiyun 			break;
1179*4882a593Smuzhiyun 		}
1180*4882a593Smuzhiyun 	}
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	if (ret)
1183*4882a593Smuzhiyun 		goto out;
1184*4882a593Smuzhiyun 	i = avail;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	/* Sanity check: available IB window must be disabled at this point */
1187*4882a593Smuzhiyun 	regval = ioread32(priv->regs + TSI721_IBWIN_LB(i));
1188*4882a593Smuzhiyun 	if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) {
1189*4882a593Smuzhiyun 		ret = -EIO;
1190*4882a593Smuzhiyun 		goto out;
1191*4882a593Smuzhiyun 	}
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	ib_win = &priv->ib_win[i];
1194*4882a593Smuzhiyun 	ib_win->active = true;
1195*4882a593Smuzhiyun 	ib_win->rstart = ibw_start;
1196*4882a593Smuzhiyun 	ib_win->lstart = loc_start;
1197*4882a593Smuzhiyun 	ib_win->size = ibw_size;
1198*4882a593Smuzhiyun 	ib_win->xlat = (lstart != rstart);
1199*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ib_win->mappings);
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	/*
1202*4882a593Smuzhiyun 	 * When using direct IBW mapping and have larger than requested IBW size
1203*4882a593Smuzhiyun 	 * we can have multiple local memory blocks mapped through the same IBW
1204*4882a593Smuzhiyun 	 * To handle this situation we maintain list of "clients" for such IBWs.
1205*4882a593Smuzhiyun 	 */
1206*4882a593Smuzhiyun 	if (direct) {
1207*4882a593Smuzhiyun 		map->lstart = lstart;
1208*4882a593Smuzhiyun 		list_add_tail(&map->node, &ib_win->mappings);
1209*4882a593Smuzhiyun 	}
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8,
1212*4882a593Smuzhiyun 			priv->regs + TSI721_IBWIN_SZ(i));
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i));
1215*4882a593Smuzhiyun 	iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD),
1216*4882a593Smuzhiyun 		  priv->regs + TSI721_IBWIN_TLA(i));
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i));
1219*4882a593Smuzhiyun 	iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN,
1220*4882a593Smuzhiyun 		priv->regs + TSI721_IBWIN_LB(i));
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	priv->ibwin_cnt--;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	tsi_debug(IBW, &priv->pdev->dev,
1225*4882a593Smuzhiyun 		"Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
1226*4882a593Smuzhiyun 		i, ibw_start, &loc_start, ibw_size);
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	return 0;
1229*4882a593Smuzhiyun out:
1230*4882a593Smuzhiyun 	kfree(map);
1231*4882a593Smuzhiyun 	return ret;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun /**
1235*4882a593Smuzhiyun  * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region.
1236*4882a593Smuzhiyun  * @mport: RapidIO master port
1237*4882a593Smuzhiyun  * @lstart: Local memory space start address.
1238*4882a593Smuzhiyun  */
tsi721_rio_unmap_inb_mem(struct rio_mport * mport,dma_addr_t lstart)1239*4882a593Smuzhiyun static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
1240*4882a593Smuzhiyun 				dma_addr_t lstart)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
1243*4882a593Smuzhiyun 	struct tsi721_ib_win *ib_win;
1244*4882a593Smuzhiyun 	int i;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	tsi_debug(IBW, &priv->pdev->dev,
1247*4882a593Smuzhiyun 		"Unmap IBW mapped to PCIe_%pad", &lstart);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	/* Search for matching active inbound translation window */
1250*4882a593Smuzhiyun 	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1251*4882a593Smuzhiyun 		ib_win = &priv->ib_win[i];
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 		/* Address translating IBWs must to be an exact march */
1254*4882a593Smuzhiyun 		if (!ib_win->active ||
1255*4882a593Smuzhiyun 		    (ib_win->xlat && lstart != ib_win->lstart))
1256*4882a593Smuzhiyun 			continue;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 		if (lstart >= ib_win->lstart &&
1259*4882a593Smuzhiyun 		    lstart < (ib_win->lstart + ib_win->size)) {
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 			if (!ib_win->xlat) {
1262*4882a593Smuzhiyun 				struct tsi721_ib_win_mapping *map;
1263*4882a593Smuzhiyun 				int found = 0;
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 				list_for_each_entry(map,
1266*4882a593Smuzhiyun 						    &ib_win->mappings, node) {
1267*4882a593Smuzhiyun 					if (map->lstart == lstart) {
1268*4882a593Smuzhiyun 						list_del(&map->node);
1269*4882a593Smuzhiyun 						kfree(map);
1270*4882a593Smuzhiyun 						found = 1;
1271*4882a593Smuzhiyun 						break;
1272*4882a593Smuzhiyun 					}
1273*4882a593Smuzhiyun 				}
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 				if (!found)
1276*4882a593Smuzhiyun 					continue;
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 				if (!list_empty(&ib_win->mappings))
1279*4882a593Smuzhiyun 					break;
1280*4882a593Smuzhiyun 			}
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 			tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i);
1283*4882a593Smuzhiyun 			iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1284*4882a593Smuzhiyun 			ib_win->active = false;
1285*4882a593Smuzhiyun 			priv->ibwin_cnt++;
1286*4882a593Smuzhiyun 			break;
1287*4882a593Smuzhiyun 		}
1288*4882a593Smuzhiyun 	}
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	if (i == TSI721_IBWIN_NUM)
1291*4882a593Smuzhiyun 		tsi_debug(IBW, &priv->pdev->dev,
1292*4882a593Smuzhiyun 			"IB window mapped to %pad not found", &lstart);
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun /**
1296*4882a593Smuzhiyun  * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe)
1297*4882a593Smuzhiyun  * translation regions.
1298*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
1299*4882a593Smuzhiyun  *
1300*4882a593Smuzhiyun  * Disables inbound windows.
1301*4882a593Smuzhiyun  */
tsi721_init_sr2pc_mapping(struct tsi721_device * priv)1302*4882a593Smuzhiyun static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	int i;
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	/* Disable all SR2PC inbound windows */
1307*4882a593Smuzhiyun 	for (i = 0; i < TSI721_IBWIN_NUM; i++)
1308*4882a593Smuzhiyun 		iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1309*4882a593Smuzhiyun 	priv->ibwin_cnt = TSI721_IBWIN_NUM;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun /*
1313*4882a593Smuzhiyun  * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe)
1314*4882a593Smuzhiyun  * translation regions.
1315*4882a593Smuzhiyun  * @priv: pointer to tsi721 device private data
1316*4882a593Smuzhiyun  */
tsi721_close_sr2pc_mapping(struct tsi721_device * priv)1317*4882a593Smuzhiyun static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun 	struct tsi721_ib_win *ib_win;
1320*4882a593Smuzhiyun 	int i;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	/* Disable all active SR2PC inbound windows */
1323*4882a593Smuzhiyun 	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1324*4882a593Smuzhiyun 		ib_win = &priv->ib_win[i];
1325*4882a593Smuzhiyun 		if (ib_win->active) {
1326*4882a593Smuzhiyun 			iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1327*4882a593Smuzhiyun 			ib_win->active = false;
1328*4882a593Smuzhiyun 		}
1329*4882a593Smuzhiyun 	}
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun /**
1333*4882a593Smuzhiyun  * tsi721_port_write_init - Inbound port write interface init
1334*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
1335*4882a593Smuzhiyun  *
1336*4882a593Smuzhiyun  * Initializes inbound port write handler.
1337*4882a593Smuzhiyun  * Returns %0 on success or %-ENOMEM on failure.
1338*4882a593Smuzhiyun  */
tsi721_port_write_init(struct tsi721_device * priv)1339*4882a593Smuzhiyun static int tsi721_port_write_init(struct tsi721_device *priv)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun 	priv->pw_discard_count = 0;
1342*4882a593Smuzhiyun 	INIT_WORK(&priv->pw_work, tsi721_pw_dpc);
1343*4882a593Smuzhiyun 	spin_lock_init(&priv->pw_fifo_lock);
1344*4882a593Smuzhiyun 	if (kfifo_alloc(&priv->pw_fifo,
1345*4882a593Smuzhiyun 			TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
1346*4882a593Smuzhiyun 		tsi_err(&priv->pdev->dev, "PW FIFO allocation failed");
1347*4882a593Smuzhiyun 		return -ENOMEM;
1348*4882a593Smuzhiyun 	}
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	/* Use reliable port-write capture mode */
1351*4882a593Smuzhiyun 	iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL);
1352*4882a593Smuzhiyun 	return 0;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun 
tsi721_port_write_free(struct tsi721_device * priv)1355*4882a593Smuzhiyun static void tsi721_port_write_free(struct tsi721_device *priv)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun 	kfifo_free(&priv->pw_fifo);
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun 
tsi721_doorbell_init(struct tsi721_device * priv)1360*4882a593Smuzhiyun static int tsi721_doorbell_init(struct tsi721_device *priv)
1361*4882a593Smuzhiyun {
1362*4882a593Smuzhiyun 	/* Outbound Doorbells do not require any setup.
1363*4882a593Smuzhiyun 	 * Tsi721 uses dedicated PCI BAR1 to generate doorbells.
1364*4882a593Smuzhiyun 	 * That BAR1 was mapped during the probe routine.
1365*4882a593Smuzhiyun 	 */
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	/* Initialize Inbound Doorbell processing DPC and queue */
1368*4882a593Smuzhiyun 	priv->db_discard_count = 0;
1369*4882a593Smuzhiyun 	INIT_WORK(&priv->idb_work, tsi721_db_dpc);
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	/* Allocate buffer for inbound doorbells queue */
1372*4882a593Smuzhiyun 	priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
1373*4882a593Smuzhiyun 					    IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1374*4882a593Smuzhiyun 					    &priv->idb_dma, GFP_KERNEL);
1375*4882a593Smuzhiyun 	if (!priv->idb_base)
1376*4882a593Smuzhiyun 		return -ENOMEM;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	tsi_debug(DBELL, &priv->pdev->dev,
1379*4882a593Smuzhiyun 		  "Allocated IDB buffer @ %p (phys = %pad)",
1380*4882a593Smuzhiyun 		  priv->idb_base, &priv->idb_dma);
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
1383*4882a593Smuzhiyun 		priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
1384*4882a593Smuzhiyun 	iowrite32(((u64)priv->idb_dma >> 32),
1385*4882a593Smuzhiyun 		priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE));
1386*4882a593Smuzhiyun 	iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR),
1387*4882a593Smuzhiyun 		priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE));
1388*4882a593Smuzhiyun 	/* Enable accepting all inbound doorbells */
1389*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE));
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE));
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	return 0;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun 
tsi721_doorbell_free(struct tsi721_device * priv)1398*4882a593Smuzhiyun static void tsi721_doorbell_free(struct tsi721_device *priv)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun 	if (priv->idb_base == NULL)
1401*4882a593Smuzhiyun 		return;
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	/* Free buffer allocated for inbound doorbell queue */
1404*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1405*4882a593Smuzhiyun 			  priv->idb_base, priv->idb_dma);
1406*4882a593Smuzhiyun 	priv->idb_base = NULL;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun /**
1410*4882a593Smuzhiyun  * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
1411*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
1412*4882a593Smuzhiyun  *
1413*4882a593Smuzhiyun  * Initialize BDMA channel allocated for RapidIO maintenance read/write
1414*4882a593Smuzhiyun  * request generation
1415*4882a593Smuzhiyun  * Returns %0 on success or %-ENOMEM on failure.
1416*4882a593Smuzhiyun  */
tsi721_bdma_maint_init(struct tsi721_device * priv)1417*4882a593Smuzhiyun static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun 	struct tsi721_dma_desc *bd_ptr;
1420*4882a593Smuzhiyun 	u64		*sts_ptr;
1421*4882a593Smuzhiyun 	dma_addr_t	bd_phys, sts_phys;
1422*4882a593Smuzhiyun 	int		sts_size;
1423*4882a593Smuzhiyun 	int		bd_num = 2;
1424*4882a593Smuzhiyun 	void __iomem	*regs;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	tsi_debug(MAINT, &priv->pdev->dev,
1427*4882a593Smuzhiyun 		  "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT);
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	/*
1430*4882a593Smuzhiyun 	 * Initialize DMA channel for maintenance requests
1431*4882a593Smuzhiyun 	 */
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	priv->mdma.ch_id = TSI721_DMACH_MAINT;
1434*4882a593Smuzhiyun 	regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	/* Allocate space for DMA descriptors */
1437*4882a593Smuzhiyun 	bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
1438*4882a593Smuzhiyun 				    bd_num * sizeof(struct tsi721_dma_desc),
1439*4882a593Smuzhiyun 				    &bd_phys, GFP_KERNEL);
1440*4882a593Smuzhiyun 	if (!bd_ptr)
1441*4882a593Smuzhiyun 		return -ENOMEM;
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	priv->mdma.bd_num = bd_num;
1444*4882a593Smuzhiyun 	priv->mdma.bd_phys = bd_phys;
1445*4882a593Smuzhiyun 	priv->mdma.bd_base = bd_ptr;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)",
1448*4882a593Smuzhiyun 		  bd_ptr, &bd_phys);
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	/* Allocate space for descriptor status FIFO */
1451*4882a593Smuzhiyun 	sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
1452*4882a593Smuzhiyun 					bd_num : TSI721_DMA_MINSTSSZ;
1453*4882a593Smuzhiyun 	sts_size = roundup_pow_of_two(sts_size);
1454*4882a593Smuzhiyun 	sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
1455*4882a593Smuzhiyun 				     sts_size * sizeof(struct tsi721_dma_sts),
1456*4882a593Smuzhiyun 				     &sts_phys, GFP_KERNEL);
1457*4882a593Smuzhiyun 	if (!sts_ptr) {
1458*4882a593Smuzhiyun 		/* Free space allocated for DMA descriptors */
1459*4882a593Smuzhiyun 		dma_free_coherent(&priv->pdev->dev,
1460*4882a593Smuzhiyun 				  bd_num * sizeof(struct tsi721_dma_desc),
1461*4882a593Smuzhiyun 				  bd_ptr, bd_phys);
1462*4882a593Smuzhiyun 		priv->mdma.bd_base = NULL;
1463*4882a593Smuzhiyun 		return -ENOMEM;
1464*4882a593Smuzhiyun 	}
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	priv->mdma.sts_phys = sts_phys;
1467*4882a593Smuzhiyun 	priv->mdma.sts_base = sts_ptr;
1468*4882a593Smuzhiyun 	priv->mdma.sts_size = sts_size;
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	tsi_debug(MAINT, &priv->pdev->dev,
1471*4882a593Smuzhiyun 		"desc status FIFO @ %p (phys = %pad) size=0x%x",
1472*4882a593Smuzhiyun 		sts_ptr, &sts_phys, sts_size);
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	/* Initialize DMA descriptors ring */
1475*4882a593Smuzhiyun 	bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
1476*4882a593Smuzhiyun 	bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
1477*4882a593Smuzhiyun 						 TSI721_DMAC_DPTRL_MASK);
1478*4882a593Smuzhiyun 	bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	/* Setup DMA descriptor pointers */
1481*4882a593Smuzhiyun 	iowrite32(((u64)bd_phys >> 32),	regs + TSI721_DMAC_DPTRH);
1482*4882a593Smuzhiyun 	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
1483*4882a593Smuzhiyun 		regs + TSI721_DMAC_DPTRL);
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	/* Setup descriptor status FIFO */
1486*4882a593Smuzhiyun 	iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
1487*4882a593Smuzhiyun 	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
1488*4882a593Smuzhiyun 		regs + TSI721_DMAC_DSBL);
1489*4882a593Smuzhiyun 	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
1490*4882a593Smuzhiyun 		regs + TSI721_DMAC_DSSZ);
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	/* Clear interrupt bits */
1493*4882a593Smuzhiyun 	iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	ioread32(regs + TSI721_DMAC_INT);
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	/* Toggle DMA channel initialization */
1498*4882a593Smuzhiyun 	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
1499*4882a593Smuzhiyun 	ioread32(regs + TSI721_DMAC_CTL);
1500*4882a593Smuzhiyun 	udelay(10);
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	return 0;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun 
tsi721_bdma_maint_free(struct tsi721_device * priv)1505*4882a593Smuzhiyun static int tsi721_bdma_maint_free(struct tsi721_device *priv)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun 	u32 ch_stat;
1508*4882a593Smuzhiyun 	struct tsi721_bdma_maint *mdma = &priv->mdma;
1509*4882a593Smuzhiyun 	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	if (mdma->bd_base == NULL)
1512*4882a593Smuzhiyun 		return 0;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	/* Check if DMA channel still running */
1515*4882a593Smuzhiyun 	ch_stat = ioread32(regs + TSI721_DMAC_STS);
1516*4882a593Smuzhiyun 	if (ch_stat & TSI721_DMAC_STS_RUN)
1517*4882a593Smuzhiyun 		return -EFAULT;
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	/* Put DMA channel into init state */
1520*4882a593Smuzhiyun 	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	/* Free space allocated for DMA descriptors */
1523*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
1524*4882a593Smuzhiyun 		mdma->bd_num * sizeof(struct tsi721_dma_desc),
1525*4882a593Smuzhiyun 		mdma->bd_base, mdma->bd_phys);
1526*4882a593Smuzhiyun 	mdma->bd_base = NULL;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	/* Free space allocated for status FIFO */
1529*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
1530*4882a593Smuzhiyun 		mdma->sts_size * sizeof(struct tsi721_dma_sts),
1531*4882a593Smuzhiyun 		mdma->sts_base, mdma->sts_phys);
1532*4882a593Smuzhiyun 	mdma->sts_base = NULL;
1533*4882a593Smuzhiyun 	return 0;
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun /* Enable Inbound Messaging Interrupts */
1537*4882a593Smuzhiyun static void
tsi721_imsg_interrupt_enable(struct tsi721_device * priv,int ch,u32 inte_mask)1538*4882a593Smuzhiyun tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
1539*4882a593Smuzhiyun 				  u32 inte_mask)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun 	u32 rval;
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	if (!inte_mask)
1544*4882a593Smuzhiyun 		return;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	/* Clear pending Inbound Messaging interrupts */
1547*4882a593Smuzhiyun 	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	/* Enable Inbound Messaging interrupts */
1550*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1551*4882a593Smuzhiyun 	iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch));
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX)
1554*4882a593Smuzhiyun 		return; /* Finished if we are in MSI-X mode */
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	/*
1557*4882a593Smuzhiyun 	 * For MSI and INTA interrupt signalling we need to enable next levels
1558*4882a593Smuzhiyun 	 */
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	/* Enable Device Channel Interrupt */
1561*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1562*4882a593Smuzhiyun 	iowrite32(rval | TSI721_INT_IMSG_CHAN(ch),
1563*4882a593Smuzhiyun 		  priv->regs + TSI721_DEV_CHAN_INTE);
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun /* Disable Inbound Messaging Interrupts */
1567*4882a593Smuzhiyun static void
tsi721_imsg_interrupt_disable(struct tsi721_device * priv,int ch,u32 inte_mask)1568*4882a593Smuzhiyun tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch,
1569*4882a593Smuzhiyun 				   u32 inte_mask)
1570*4882a593Smuzhiyun {
1571*4882a593Smuzhiyun 	u32 rval;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	if (!inte_mask)
1574*4882a593Smuzhiyun 		return;
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	/* Clear pending Inbound Messaging interrupts */
1577*4882a593Smuzhiyun 	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	/* Disable Inbound Messaging interrupts */
1580*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1581*4882a593Smuzhiyun 	rval &= ~inte_mask;
1582*4882a593Smuzhiyun 	iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch));
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX)
1585*4882a593Smuzhiyun 		return; /* Finished if we are in MSI-X mode */
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	/*
1588*4882a593Smuzhiyun 	 * For MSI and INTA interrupt signalling we need to disable next levels
1589*4882a593Smuzhiyun 	 */
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	/* Disable Device Channel Interrupt */
1592*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1593*4882a593Smuzhiyun 	rval &= ~TSI721_INT_IMSG_CHAN(ch);
1594*4882a593Smuzhiyun 	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun /* Enable Outbound Messaging interrupts */
1598*4882a593Smuzhiyun static void
tsi721_omsg_interrupt_enable(struct tsi721_device * priv,int ch,u32 inte_mask)1599*4882a593Smuzhiyun tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch,
1600*4882a593Smuzhiyun 				  u32 inte_mask)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun 	u32 rval;
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	if (!inte_mask)
1605*4882a593Smuzhiyun 		return;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	/* Clear pending Outbound Messaging interrupts */
1608*4882a593Smuzhiyun 	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	/* Enable Outbound Messaging channel interrupts */
1611*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1612*4882a593Smuzhiyun 	iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch));
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX)
1615*4882a593Smuzhiyun 		return; /* Finished if we are in MSI-X mode */
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	/*
1618*4882a593Smuzhiyun 	 * For MSI and INTA interrupt signalling we need to enable next levels
1619*4882a593Smuzhiyun 	 */
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	/* Enable Device Channel Interrupt */
1622*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1623*4882a593Smuzhiyun 	iowrite32(rval | TSI721_INT_OMSG_CHAN(ch),
1624*4882a593Smuzhiyun 		  priv->regs + TSI721_DEV_CHAN_INTE);
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun /* Disable Outbound Messaging interrupts */
1628*4882a593Smuzhiyun static void
tsi721_omsg_interrupt_disable(struct tsi721_device * priv,int ch,u32 inte_mask)1629*4882a593Smuzhiyun tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
1630*4882a593Smuzhiyun 				   u32 inte_mask)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun 	u32 rval;
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 	if (!inte_mask)
1635*4882a593Smuzhiyun 		return;
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	/* Clear pending Outbound Messaging interrupts */
1638*4882a593Smuzhiyun 	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	/* Disable Outbound Messaging interrupts */
1641*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1642*4882a593Smuzhiyun 	rval &= ~inte_mask;
1643*4882a593Smuzhiyun 	iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch));
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX)
1646*4882a593Smuzhiyun 		return; /* Finished if we are in MSI-X mode */
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	/*
1649*4882a593Smuzhiyun 	 * For MSI and INTA interrupt signalling we need to disable next levels
1650*4882a593Smuzhiyun 	 */
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	/* Disable Device Channel Interrupt */
1653*4882a593Smuzhiyun 	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1654*4882a593Smuzhiyun 	rval &= ~TSI721_INT_OMSG_CHAN(ch);
1655*4882a593Smuzhiyun 	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun /**
1659*4882a593Smuzhiyun  * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue
1660*4882a593Smuzhiyun  * @mport: Master port with outbound message queue
1661*4882a593Smuzhiyun  * @rdev: Target of outbound message
1662*4882a593Smuzhiyun  * @mbox: Outbound mailbox
1663*4882a593Smuzhiyun  * @buffer: Message to add to outbound queue
1664*4882a593Smuzhiyun  * @len: Length of message
1665*4882a593Smuzhiyun  */
1666*4882a593Smuzhiyun static int
tsi721_add_outb_message(struct rio_mport * mport,struct rio_dev * rdev,int mbox,void * buffer,size_t len)1667*4882a593Smuzhiyun tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
1668*4882a593Smuzhiyun 			void *buffer, size_t len)
1669*4882a593Smuzhiyun {
1670*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
1671*4882a593Smuzhiyun 	struct tsi721_omsg_desc *desc;
1672*4882a593Smuzhiyun 	u32 tx_slot;
1673*4882a593Smuzhiyun 	unsigned long flags;
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	if (!priv->omsg_init[mbox] ||
1676*4882a593Smuzhiyun 	    len > TSI721_MSG_MAX_SIZE || len < 8)
1677*4882a593Smuzhiyun 		return -EINVAL;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags);
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	tx_slot = priv->omsg_ring[mbox].tx_slot;
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	/* Copy copy message into transfer buffer */
1684*4882a593Smuzhiyun 	memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len);
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	if (len & 0x7)
1687*4882a593Smuzhiyun 		len += 8;
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 	/* Build descriptor associated with buffer */
1690*4882a593Smuzhiyun 	desc = priv->omsg_ring[mbox].omd_base;
1691*4882a593Smuzhiyun 	desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
1692*4882a593Smuzhiyun #ifdef TSI721_OMSG_DESC_INT
1693*4882a593Smuzhiyun 	/* Request IOF_DONE interrupt generation for each N-th frame in queue */
1694*4882a593Smuzhiyun 	if (tx_slot % 4 == 0)
1695*4882a593Smuzhiyun 		desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
1696*4882a593Smuzhiyun #endif
1697*4882a593Smuzhiyun 	desc[tx_slot].msg_info =
1698*4882a593Smuzhiyun 		cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
1699*4882a593Smuzhiyun 			    (0xe << 12) | (len & 0xff8));
1700*4882a593Smuzhiyun 	desc[tx_slot].bufptr_lo =
1701*4882a593Smuzhiyun 		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] &
1702*4882a593Smuzhiyun 			    0xffffffff);
1703*4882a593Smuzhiyun 	desc[tx_slot].bufptr_hi =
1704*4882a593Smuzhiyun 		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32);
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	priv->omsg_ring[mbox].wr_count++;
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	/* Go to next descriptor */
1709*4882a593Smuzhiyun 	if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) {
1710*4882a593Smuzhiyun 		priv->omsg_ring[mbox].tx_slot = 0;
1711*4882a593Smuzhiyun 		/* Move through the ring link descriptor at the end */
1712*4882a593Smuzhiyun 		priv->omsg_ring[mbox].wr_count++;
1713*4882a593Smuzhiyun 	}
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	mb();
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	/* Set new write count value */
1718*4882a593Smuzhiyun 	iowrite32(priv->omsg_ring[mbox].wr_count,
1719*4882a593Smuzhiyun 		priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1720*4882a593Smuzhiyun 	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags);
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 	return 0;
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun /**
1728*4882a593Smuzhiyun  * tsi721_omsg_handler - Outbound Message Interrupt Handler
1729*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
1730*4882a593Smuzhiyun  * @ch:   number of OB MSG channel to service
1731*4882a593Smuzhiyun  *
1732*4882a593Smuzhiyun  * Services channel interrupts from outbound messaging engine.
1733*4882a593Smuzhiyun  */
tsi721_omsg_handler(struct tsi721_device * priv,int ch)1734*4882a593Smuzhiyun static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun 	u32 omsg_int;
1737*4882a593Smuzhiyun 	struct rio_mport *mport = &priv->mport;
1738*4882a593Smuzhiyun 	void *dev_id = NULL;
1739*4882a593Smuzhiyun 	u32 tx_slot = 0xffffffff;
1740*4882a593Smuzhiyun 	int do_callback = 0;
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	spin_lock(&priv->omsg_ring[ch].lock);
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
1747*4882a593Smuzhiyun 		tsi_info(&priv->pdev->dev,
1748*4882a593Smuzhiyun 			"OB MBOX%d: Status FIFO is full", ch);
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
1751*4882a593Smuzhiyun 		u32 srd_ptr;
1752*4882a593Smuzhiyun 		u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
1753*4882a593Smuzhiyun 		int i, j;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 		/*
1756*4882a593Smuzhiyun 		 * Find last successfully processed descriptor
1757*4882a593Smuzhiyun 		 */
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 		/* Check and clear descriptor status FIFO entries */
1760*4882a593Smuzhiyun 		srd_ptr = priv->omsg_ring[ch].sts_rdptr;
1761*4882a593Smuzhiyun 		sts_ptr = priv->omsg_ring[ch].sts_base;
1762*4882a593Smuzhiyun 		j = srd_ptr * 8;
1763*4882a593Smuzhiyun 		while (sts_ptr[j]) {
1764*4882a593Smuzhiyun 			for (i = 0; i < 8 && sts_ptr[j]; i++, j++) {
1765*4882a593Smuzhiyun 				prev_ptr = last_ptr;
1766*4882a593Smuzhiyun 				last_ptr = le64_to_cpu(sts_ptr[j]);
1767*4882a593Smuzhiyun 				sts_ptr[j] = 0;
1768*4882a593Smuzhiyun 			}
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 			++srd_ptr;
1771*4882a593Smuzhiyun 			srd_ptr %= priv->omsg_ring[ch].sts_size;
1772*4882a593Smuzhiyun 			j = srd_ptr * 8;
1773*4882a593Smuzhiyun 		}
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 		if (last_ptr == 0)
1776*4882a593Smuzhiyun 			goto no_sts_update;
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 		priv->omsg_ring[ch].sts_rdptr = srd_ptr;
1779*4882a593Smuzhiyun 		iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 		if (!mport->outb_msg[ch].mcback)
1782*4882a593Smuzhiyun 			goto no_sts_update;
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 		/* Inform upper layer about transfer completion */
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 		tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/
1787*4882a593Smuzhiyun 						sizeof(struct tsi721_omsg_desc);
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 		/*
1790*4882a593Smuzhiyun 		 * Check if this is a Link Descriptor (LD).
1791*4882a593Smuzhiyun 		 * If yes, ignore LD and use descriptor processed
1792*4882a593Smuzhiyun 		 * before LD.
1793*4882a593Smuzhiyun 		 */
1794*4882a593Smuzhiyun 		if (tx_slot == priv->omsg_ring[ch].size) {
1795*4882a593Smuzhiyun 			if (prev_ptr)
1796*4882a593Smuzhiyun 				tx_slot = (prev_ptr -
1797*4882a593Smuzhiyun 					(u64)priv->omsg_ring[ch].omd_phys)/
1798*4882a593Smuzhiyun 						sizeof(struct tsi721_omsg_desc);
1799*4882a593Smuzhiyun 			else
1800*4882a593Smuzhiyun 				goto no_sts_update;
1801*4882a593Smuzhiyun 		}
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 		if (tx_slot >= priv->omsg_ring[ch].size)
1804*4882a593Smuzhiyun 			tsi_debug(OMSG, &priv->pdev->dev,
1805*4882a593Smuzhiyun 				  "OB_MSG tx_slot=%x > size=%x",
1806*4882a593Smuzhiyun 				  tx_slot, priv->omsg_ring[ch].size);
1807*4882a593Smuzhiyun 		WARN_ON(tx_slot >= priv->omsg_ring[ch].size);
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 		/* Move slot index to the next message to be sent */
1810*4882a593Smuzhiyun 		++tx_slot;
1811*4882a593Smuzhiyun 		if (tx_slot == priv->omsg_ring[ch].size)
1812*4882a593Smuzhiyun 			tx_slot = 0;
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 		dev_id = priv->omsg_ring[ch].dev_id;
1815*4882a593Smuzhiyun 		do_callback = 1;
1816*4882a593Smuzhiyun 	}
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun no_sts_update:
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	if (omsg_int & TSI721_OBDMAC_INT_ERROR) {
1821*4882a593Smuzhiyun 		/*
1822*4882a593Smuzhiyun 		* Outbound message operation aborted due to error,
1823*4882a593Smuzhiyun 		* reinitialize OB MSG channel
1824*4882a593Smuzhiyun 		*/
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 		tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x",
1827*4882a593Smuzhiyun 			  ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 		iowrite32(TSI721_OBDMAC_INT_ERROR,
1830*4882a593Smuzhiyun 				priv->regs + TSI721_OBDMAC_INT(ch));
1831*4882a593Smuzhiyun 		iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
1832*4882a593Smuzhiyun 				priv->regs + TSI721_OBDMAC_CTL(ch));
1833*4882a593Smuzhiyun 		ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 		/* Inform upper level to clear all pending tx slots */
1836*4882a593Smuzhiyun 		dev_id = priv->omsg_ring[ch].dev_id;
1837*4882a593Smuzhiyun 		tx_slot = priv->omsg_ring[ch].tx_slot;
1838*4882a593Smuzhiyun 		do_callback = 1;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 		/* Synch tx_slot tracking */
1841*4882a593Smuzhiyun 		iowrite32(priv->omsg_ring[ch].tx_slot,
1842*4882a593Smuzhiyun 			priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1843*4882a593Smuzhiyun 		ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1844*4882a593Smuzhiyun 		priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot;
1845*4882a593Smuzhiyun 		priv->omsg_ring[ch].sts_rdptr = 0;
1846*4882a593Smuzhiyun 	}
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 	/* Clear channel interrupts */
1849*4882a593Smuzhiyun 	iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch));
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	if (!(priv->flags & TSI721_USING_MSIX)) {
1852*4882a593Smuzhiyun 		u32 ch_inte;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 		/* Re-enable channel interrupts */
1855*4882a593Smuzhiyun 		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1856*4882a593Smuzhiyun 		ch_inte |= TSI721_INT_OMSG_CHAN(ch);
1857*4882a593Smuzhiyun 		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
1858*4882a593Smuzhiyun 	}
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	spin_unlock(&priv->omsg_ring[ch].lock);
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	if (mport->outb_msg[ch].mcback && do_callback)
1863*4882a593Smuzhiyun 		mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot);
1864*4882a593Smuzhiyun }
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun /**
1867*4882a593Smuzhiyun  * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox
1868*4882a593Smuzhiyun  * @mport: Master port implementing Outbound Messaging Engine
1869*4882a593Smuzhiyun  * @dev_id: Device specific pointer to pass on event
1870*4882a593Smuzhiyun  * @mbox: Mailbox to open
1871*4882a593Smuzhiyun  * @entries: Number of entries in the outbound mailbox ring
1872*4882a593Smuzhiyun  */
tsi721_open_outb_mbox(struct rio_mport * mport,void * dev_id,int mbox,int entries)1873*4882a593Smuzhiyun static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1874*4882a593Smuzhiyun 				 int mbox, int entries)
1875*4882a593Smuzhiyun {
1876*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
1877*4882a593Smuzhiyun 	struct tsi721_omsg_desc *bd_ptr;
1878*4882a593Smuzhiyun 	int i, rc = 0;
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 	if ((entries < TSI721_OMSGD_MIN_RING_SIZE) ||
1881*4882a593Smuzhiyun 	    (entries > (TSI721_OMSGD_RING_SIZE)) ||
1882*4882a593Smuzhiyun 	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
1883*4882a593Smuzhiyun 		rc = -EINVAL;
1884*4882a593Smuzhiyun 		goto out;
1885*4882a593Smuzhiyun 	}
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	if ((mbox_sel & (1 << mbox)) == 0) {
1888*4882a593Smuzhiyun 		rc = -ENODEV;
1889*4882a593Smuzhiyun 		goto out;
1890*4882a593Smuzhiyun 	}
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun 	priv->omsg_ring[mbox].dev_id = dev_id;
1893*4882a593Smuzhiyun 	priv->omsg_ring[mbox].size = entries;
1894*4882a593Smuzhiyun 	priv->omsg_ring[mbox].sts_rdptr = 0;
1895*4882a593Smuzhiyun 	spin_lock_init(&priv->omsg_ring[mbox].lock);
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 	/* Outbound Msg Buffer allocation based on
1898*4882a593Smuzhiyun 	   the number of maximum descriptor entries */
1899*4882a593Smuzhiyun 	for (i = 0; i < entries; i++) {
1900*4882a593Smuzhiyun 		priv->omsg_ring[mbox].omq_base[i] =
1901*4882a593Smuzhiyun 			dma_alloc_coherent(
1902*4882a593Smuzhiyun 				&priv->pdev->dev, TSI721_MSG_BUFFER_SIZE,
1903*4882a593Smuzhiyun 				&priv->omsg_ring[mbox].omq_phys[i],
1904*4882a593Smuzhiyun 				GFP_KERNEL);
1905*4882a593Smuzhiyun 		if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
1906*4882a593Smuzhiyun 			tsi_debug(OMSG, &priv->pdev->dev,
1907*4882a593Smuzhiyun 				  "ENOMEM for OB_MSG_%d data buffer", mbox);
1908*4882a593Smuzhiyun 			rc = -ENOMEM;
1909*4882a593Smuzhiyun 			goto out_buf;
1910*4882a593Smuzhiyun 		}
1911*4882a593Smuzhiyun 	}
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	/* Outbound message descriptor allocation */
1914*4882a593Smuzhiyun 	priv->omsg_ring[mbox].omd_base = dma_alloc_coherent(
1915*4882a593Smuzhiyun 				&priv->pdev->dev,
1916*4882a593Smuzhiyun 				(entries + 1) * sizeof(struct tsi721_omsg_desc),
1917*4882a593Smuzhiyun 				&priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
1918*4882a593Smuzhiyun 	if (priv->omsg_ring[mbox].omd_base == NULL) {
1919*4882a593Smuzhiyun 		tsi_debug(OMSG, &priv->pdev->dev,
1920*4882a593Smuzhiyun 			"ENOMEM for OB_MSG_%d descriptor memory", mbox);
1921*4882a593Smuzhiyun 		rc = -ENOMEM;
1922*4882a593Smuzhiyun 		goto out_buf;
1923*4882a593Smuzhiyun 	}
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	priv->omsg_ring[mbox].tx_slot = 0;
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun 	/* Outbound message descriptor status FIFO allocation */
1928*4882a593Smuzhiyun 	priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1929*4882a593Smuzhiyun 	priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
1930*4882a593Smuzhiyun 							    priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1931*4882a593Smuzhiyun 							    &priv->omsg_ring[mbox].sts_phys,
1932*4882a593Smuzhiyun 							    GFP_KERNEL);
1933*4882a593Smuzhiyun 	if (priv->omsg_ring[mbox].sts_base == NULL) {
1934*4882a593Smuzhiyun 		tsi_debug(OMSG, &priv->pdev->dev,
1935*4882a593Smuzhiyun 			"ENOMEM for OB_MSG_%d status FIFO", mbox);
1936*4882a593Smuzhiyun 		rc = -ENOMEM;
1937*4882a593Smuzhiyun 		goto out_desc;
1938*4882a593Smuzhiyun 	}
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 	/*
1941*4882a593Smuzhiyun 	 * Configure Outbound Messaging Engine
1942*4882a593Smuzhiyun 	 */
1943*4882a593Smuzhiyun 
1944*4882a593Smuzhiyun 	/* Setup Outbound Message descriptor pointer */
1945*4882a593Smuzhiyun 	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32),
1946*4882a593Smuzhiyun 			priv->regs + TSI721_OBDMAC_DPTRH(mbox));
1947*4882a593Smuzhiyun 	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys &
1948*4882a593Smuzhiyun 					TSI721_OBDMAC_DPTRL_MASK),
1949*4882a593Smuzhiyun 			priv->regs + TSI721_OBDMAC_DPTRL(mbox));
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 	/* Setup Outbound Message descriptor status FIFO */
1952*4882a593Smuzhiyun 	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32),
1953*4882a593Smuzhiyun 			priv->regs + TSI721_OBDMAC_DSBH(mbox));
1954*4882a593Smuzhiyun 	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys &
1955*4882a593Smuzhiyun 					TSI721_OBDMAC_DSBL_MASK),
1956*4882a593Smuzhiyun 			priv->regs + TSI721_OBDMAC_DSBL(mbox));
1957*4882a593Smuzhiyun 	iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size),
1958*4882a593Smuzhiyun 		priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox));
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 	/* Enable interrupts */
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
1963*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX) {
1964*4882a593Smuzhiyun 		int idx = TSI721_VECT_OMB0_DONE + mbox;
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 		/* Request interrupt service if we are in MSI-X mode */
1967*4882a593Smuzhiyun 		rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
1968*4882a593Smuzhiyun 				 priv->msix[idx].irq_name, (void *)priv);
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 		if (rc) {
1971*4882a593Smuzhiyun 			tsi_debug(OMSG, &priv->pdev->dev,
1972*4882a593Smuzhiyun 				"Unable to get MSI-X IRQ for OBOX%d-DONE",
1973*4882a593Smuzhiyun 				mbox);
1974*4882a593Smuzhiyun 			goto out_stat;
1975*4882a593Smuzhiyun 		}
1976*4882a593Smuzhiyun 
1977*4882a593Smuzhiyun 		idx = TSI721_VECT_OMB0_INT + mbox;
1978*4882a593Smuzhiyun 		rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
1979*4882a593Smuzhiyun 				 priv->msix[idx].irq_name, (void *)priv);
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 		if (rc)	{
1982*4882a593Smuzhiyun 			tsi_debug(OMSG, &priv->pdev->dev,
1983*4882a593Smuzhiyun 				"Unable to get MSI-X IRQ for MBOX%d-INT", mbox);
1984*4882a593Smuzhiyun 			idx = TSI721_VECT_OMB0_DONE + mbox;
1985*4882a593Smuzhiyun 			free_irq(priv->msix[idx].vector, (void *)priv);
1986*4882a593Smuzhiyun 			goto out_stat;
1987*4882a593Smuzhiyun 		}
1988*4882a593Smuzhiyun 	}
1989*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL);
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	/* Initialize Outbound Message descriptors ring */
1994*4882a593Smuzhiyun 	bd_ptr = priv->omsg_ring[mbox].omd_base;
1995*4882a593Smuzhiyun 	bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29);
1996*4882a593Smuzhiyun 	bd_ptr[entries].msg_info = 0;
1997*4882a593Smuzhiyun 	bd_ptr[entries].next_lo =
1998*4882a593Smuzhiyun 		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys &
1999*4882a593Smuzhiyun 		TSI721_OBDMAC_DPTRL_MASK);
2000*4882a593Smuzhiyun 	bd_ptr[entries].next_hi =
2001*4882a593Smuzhiyun 		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32);
2002*4882a593Smuzhiyun 	priv->omsg_ring[mbox].wr_count = 0;
2003*4882a593Smuzhiyun 	mb();
2004*4882a593Smuzhiyun 
2005*4882a593Smuzhiyun 	/* Initialize Outbound Message engine */
2006*4882a593Smuzhiyun 	iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
2007*4882a593Smuzhiyun 		  priv->regs + TSI721_OBDMAC_CTL(mbox));
2008*4882a593Smuzhiyun 	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
2009*4882a593Smuzhiyun 	udelay(10);
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	priv->omsg_init[mbox] = 1;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	return 0;
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
2016*4882a593Smuzhiyun out_stat:
2017*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2018*4882a593Smuzhiyun 		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
2019*4882a593Smuzhiyun 		priv->omsg_ring[mbox].sts_base,
2020*4882a593Smuzhiyun 		priv->omsg_ring[mbox].sts_phys);
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 	priv->omsg_ring[mbox].sts_base = NULL;
2023*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun out_desc:
2026*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2027*4882a593Smuzhiyun 		(entries + 1) * sizeof(struct tsi721_omsg_desc),
2028*4882a593Smuzhiyun 		priv->omsg_ring[mbox].omd_base,
2029*4882a593Smuzhiyun 		priv->omsg_ring[mbox].omd_phys);
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun 	priv->omsg_ring[mbox].omd_base = NULL;
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun out_buf:
2034*4882a593Smuzhiyun 	for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
2035*4882a593Smuzhiyun 		if (priv->omsg_ring[mbox].omq_base[i]) {
2036*4882a593Smuzhiyun 			dma_free_coherent(&priv->pdev->dev,
2037*4882a593Smuzhiyun 				TSI721_MSG_BUFFER_SIZE,
2038*4882a593Smuzhiyun 				priv->omsg_ring[mbox].omq_base[i],
2039*4882a593Smuzhiyun 				priv->omsg_ring[mbox].omq_phys[i]);
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 			priv->omsg_ring[mbox].omq_base[i] = NULL;
2042*4882a593Smuzhiyun 		}
2043*4882a593Smuzhiyun 	}
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun out:
2046*4882a593Smuzhiyun 	return rc;
2047*4882a593Smuzhiyun }
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun /**
2050*4882a593Smuzhiyun  * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox
2051*4882a593Smuzhiyun  * @mport: Master port implementing the outbound message unit
2052*4882a593Smuzhiyun  * @mbox: Mailbox to close
2053*4882a593Smuzhiyun  */
tsi721_close_outb_mbox(struct rio_mport * mport,int mbox)2054*4882a593Smuzhiyun static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
2057*4882a593Smuzhiyun 	u32 i;
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	if (!priv->omsg_init[mbox])
2060*4882a593Smuzhiyun 		return;
2061*4882a593Smuzhiyun 	priv->omsg_init[mbox] = 0;
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 	/* Disable Interrupts */
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL);
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
2068*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX) {
2069*4882a593Smuzhiyun 		free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
2070*4882a593Smuzhiyun 			 (void *)priv);
2071*4882a593Smuzhiyun 		free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
2072*4882a593Smuzhiyun 			 (void *)priv);
2073*4882a593Smuzhiyun 	}
2074*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	/* Free OMSG Descriptor Status FIFO */
2077*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2078*4882a593Smuzhiyun 		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
2079*4882a593Smuzhiyun 		priv->omsg_ring[mbox].sts_base,
2080*4882a593Smuzhiyun 		priv->omsg_ring[mbox].sts_phys);
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun 	priv->omsg_ring[mbox].sts_base = NULL;
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	/* Free OMSG descriptors */
2085*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2086*4882a593Smuzhiyun 		(priv->omsg_ring[mbox].size + 1) *
2087*4882a593Smuzhiyun 			sizeof(struct tsi721_omsg_desc),
2088*4882a593Smuzhiyun 		priv->omsg_ring[mbox].omd_base,
2089*4882a593Smuzhiyun 		priv->omsg_ring[mbox].omd_phys);
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	priv->omsg_ring[mbox].omd_base = NULL;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	/* Free message buffers */
2094*4882a593Smuzhiyun 	for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
2095*4882a593Smuzhiyun 		if (priv->omsg_ring[mbox].omq_base[i]) {
2096*4882a593Smuzhiyun 			dma_free_coherent(&priv->pdev->dev,
2097*4882a593Smuzhiyun 				TSI721_MSG_BUFFER_SIZE,
2098*4882a593Smuzhiyun 				priv->omsg_ring[mbox].omq_base[i],
2099*4882a593Smuzhiyun 				priv->omsg_ring[mbox].omq_phys[i]);
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 			priv->omsg_ring[mbox].omq_base[i] = NULL;
2102*4882a593Smuzhiyun 		}
2103*4882a593Smuzhiyun 	}
2104*4882a593Smuzhiyun }
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun /**
2107*4882a593Smuzhiyun  * tsi721_imsg_handler - Inbound Message Interrupt Handler
2108*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
2109*4882a593Smuzhiyun  * @ch: inbound message channel number to service
2110*4882a593Smuzhiyun  *
2111*4882a593Smuzhiyun  * Services channel interrupts from inbound messaging engine.
2112*4882a593Smuzhiyun  */
tsi721_imsg_handler(struct tsi721_device * priv,int ch)2113*4882a593Smuzhiyun static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
2114*4882a593Smuzhiyun {
2115*4882a593Smuzhiyun 	u32 mbox = ch - 4;
2116*4882a593Smuzhiyun 	u32 imsg_int;
2117*4882a593Smuzhiyun 	struct rio_mport *mport = &priv->mport;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	spin_lock(&priv->imsg_ring[mbox].lock);
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	if (imsg_int & TSI721_IBDMAC_INT_SRTO)
2124*4882a593Smuzhiyun 		tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox);
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
2127*4882a593Smuzhiyun 		tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox);
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
2130*4882a593Smuzhiyun 		tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox);
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	/* Clear IB channel interrupts */
2133*4882a593Smuzhiyun 	iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 	/* If an IB Msg is received notify the upper layer */
2136*4882a593Smuzhiyun 	if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
2137*4882a593Smuzhiyun 		mport->inb_msg[mbox].mcback)
2138*4882a593Smuzhiyun 		mport->inb_msg[mbox].mcback(mport,
2139*4882a593Smuzhiyun 				priv->imsg_ring[mbox].dev_id, mbox, -1);
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 	if (!(priv->flags & TSI721_USING_MSIX)) {
2142*4882a593Smuzhiyun 		u32 ch_inte;
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 		/* Re-enable channel interrupts */
2145*4882a593Smuzhiyun 		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
2146*4882a593Smuzhiyun 		ch_inte |= TSI721_INT_IMSG_CHAN(ch);
2147*4882a593Smuzhiyun 		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
2148*4882a593Smuzhiyun 	}
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	spin_unlock(&priv->imsg_ring[mbox].lock);
2151*4882a593Smuzhiyun }
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun /**
2154*4882a593Smuzhiyun  * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox
2155*4882a593Smuzhiyun  * @mport: Master port implementing the Inbound Messaging Engine
2156*4882a593Smuzhiyun  * @dev_id: Device specific pointer to pass on event
2157*4882a593Smuzhiyun  * @mbox: Mailbox to open
2158*4882a593Smuzhiyun  * @entries: Number of entries in the inbound mailbox ring
2159*4882a593Smuzhiyun  */
tsi721_open_inb_mbox(struct rio_mport * mport,void * dev_id,int mbox,int entries)2160*4882a593Smuzhiyun static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
2161*4882a593Smuzhiyun 				int mbox, int entries)
2162*4882a593Smuzhiyun {
2163*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
2164*4882a593Smuzhiyun 	int ch = mbox + 4;
2165*4882a593Smuzhiyun 	int i;
2166*4882a593Smuzhiyun 	u64 *free_ptr;
2167*4882a593Smuzhiyun 	int rc = 0;
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	if ((entries < TSI721_IMSGD_MIN_RING_SIZE) ||
2170*4882a593Smuzhiyun 	    (entries > TSI721_IMSGD_RING_SIZE) ||
2171*4882a593Smuzhiyun 	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
2172*4882a593Smuzhiyun 		rc = -EINVAL;
2173*4882a593Smuzhiyun 		goto out;
2174*4882a593Smuzhiyun 	}
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 	if ((mbox_sel & (1 << mbox)) == 0) {
2177*4882a593Smuzhiyun 		rc = -ENODEV;
2178*4882a593Smuzhiyun 		goto out;
2179*4882a593Smuzhiyun 	}
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	/* Initialize IB Messaging Ring */
2182*4882a593Smuzhiyun 	priv->imsg_ring[mbox].dev_id = dev_id;
2183*4882a593Smuzhiyun 	priv->imsg_ring[mbox].size = entries;
2184*4882a593Smuzhiyun 	priv->imsg_ring[mbox].rx_slot = 0;
2185*4882a593Smuzhiyun 	priv->imsg_ring[mbox].desc_rdptr = 0;
2186*4882a593Smuzhiyun 	priv->imsg_ring[mbox].fq_wrptr = 0;
2187*4882a593Smuzhiyun 	for (i = 0; i < priv->imsg_ring[mbox].size; i++)
2188*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imq_base[i] = NULL;
2189*4882a593Smuzhiyun 	spin_lock_init(&priv->imsg_ring[mbox].lock);
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	/* Allocate buffers for incoming messages */
2192*4882a593Smuzhiyun 	priv->imsg_ring[mbox].buf_base =
2193*4882a593Smuzhiyun 		dma_alloc_coherent(&priv->pdev->dev,
2194*4882a593Smuzhiyun 				   entries * TSI721_MSG_BUFFER_SIZE,
2195*4882a593Smuzhiyun 				   &priv->imsg_ring[mbox].buf_phys,
2196*4882a593Smuzhiyun 				   GFP_KERNEL);
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 	if (priv->imsg_ring[mbox].buf_base == NULL) {
2199*4882a593Smuzhiyun 		tsi_err(&priv->pdev->dev,
2200*4882a593Smuzhiyun 			"Failed to allocate buffers for IB MBOX%d", mbox);
2201*4882a593Smuzhiyun 		rc = -ENOMEM;
2202*4882a593Smuzhiyun 		goto out;
2203*4882a593Smuzhiyun 	}
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 	/* Allocate memory for circular free list */
2206*4882a593Smuzhiyun 	priv->imsg_ring[mbox].imfq_base =
2207*4882a593Smuzhiyun 		dma_alloc_coherent(&priv->pdev->dev,
2208*4882a593Smuzhiyun 				   entries * 8,
2209*4882a593Smuzhiyun 				   &priv->imsg_ring[mbox].imfq_phys,
2210*4882a593Smuzhiyun 				   GFP_KERNEL);
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	if (priv->imsg_ring[mbox].imfq_base == NULL) {
2213*4882a593Smuzhiyun 		tsi_err(&priv->pdev->dev,
2214*4882a593Smuzhiyun 			"Failed to allocate free queue for IB MBOX%d", mbox);
2215*4882a593Smuzhiyun 		rc = -ENOMEM;
2216*4882a593Smuzhiyun 		goto out_buf;
2217*4882a593Smuzhiyun 	}
2218*4882a593Smuzhiyun 
2219*4882a593Smuzhiyun 	/* Allocate memory for Inbound message descriptors */
2220*4882a593Smuzhiyun 	priv->imsg_ring[mbox].imd_base =
2221*4882a593Smuzhiyun 		dma_alloc_coherent(&priv->pdev->dev,
2222*4882a593Smuzhiyun 				   entries * sizeof(struct tsi721_imsg_desc),
2223*4882a593Smuzhiyun 				   &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	if (priv->imsg_ring[mbox].imd_base == NULL) {
2226*4882a593Smuzhiyun 		tsi_err(&priv->pdev->dev,
2227*4882a593Smuzhiyun 			"Failed to allocate descriptor memory for IB MBOX%d",
2228*4882a593Smuzhiyun 			mbox);
2229*4882a593Smuzhiyun 		rc = -ENOMEM;
2230*4882a593Smuzhiyun 		goto out_dma;
2231*4882a593Smuzhiyun 	}
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun 	/* Fill free buffer pointer list */
2234*4882a593Smuzhiyun 	free_ptr = priv->imsg_ring[mbox].imfq_base;
2235*4882a593Smuzhiyun 	for (i = 0; i < entries; i++)
2236*4882a593Smuzhiyun 		free_ptr[i] = cpu_to_le64(
2237*4882a593Smuzhiyun 				(u64)(priv->imsg_ring[mbox].buf_phys) +
2238*4882a593Smuzhiyun 				i * 0x1000);
2239*4882a593Smuzhiyun 
2240*4882a593Smuzhiyun 	mb();
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 	/*
2243*4882a593Smuzhiyun 	 * For mapping of inbound SRIO Messages into appropriate queues we need
2244*4882a593Smuzhiyun 	 * to set Inbound Device ID register in the messaging engine. We do it
2245*4882a593Smuzhiyun 	 * once when first inbound mailbox is requested.
2246*4882a593Smuzhiyun 	 */
2247*4882a593Smuzhiyun 	if (!(priv->flags & TSI721_IMSGID_SET)) {
2248*4882a593Smuzhiyun 		iowrite32((u32)priv->mport.host_deviceid,
2249*4882a593Smuzhiyun 			priv->regs + TSI721_IB_DEVID);
2250*4882a593Smuzhiyun 		priv->flags |= TSI721_IMSGID_SET;
2251*4882a593Smuzhiyun 	}
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	/*
2254*4882a593Smuzhiyun 	 * Configure Inbound Messaging channel (ch = mbox + 4)
2255*4882a593Smuzhiyun 	 */
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun 	/* Setup Inbound Message free queue */
2258*4882a593Smuzhiyun 	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32),
2259*4882a593Smuzhiyun 		priv->regs + TSI721_IBDMAC_FQBH(ch));
2260*4882a593Smuzhiyun 	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys &
2261*4882a593Smuzhiyun 			TSI721_IBDMAC_FQBL_MASK),
2262*4882a593Smuzhiyun 		priv->regs+TSI721_IBDMAC_FQBL(ch));
2263*4882a593Smuzhiyun 	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
2264*4882a593Smuzhiyun 		priv->regs + TSI721_IBDMAC_FQSZ(ch));
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	/* Setup Inbound Message descriptor queue */
2267*4882a593Smuzhiyun 	iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32),
2268*4882a593Smuzhiyun 		priv->regs + TSI721_IBDMAC_DQBH(ch));
2269*4882a593Smuzhiyun 	iowrite32(((u32)priv->imsg_ring[mbox].imd_phys &
2270*4882a593Smuzhiyun 		   (u32)TSI721_IBDMAC_DQBL_MASK),
2271*4882a593Smuzhiyun 		priv->regs+TSI721_IBDMAC_DQBL(ch));
2272*4882a593Smuzhiyun 	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
2273*4882a593Smuzhiyun 		priv->regs + TSI721_IBDMAC_DQSZ(ch));
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	/* Enable interrupts */
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
2278*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX) {
2279*4882a593Smuzhiyun 		int idx = TSI721_VECT_IMB0_RCV + mbox;
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 		/* Request interrupt service if we are in MSI-X mode */
2282*4882a593Smuzhiyun 		rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
2283*4882a593Smuzhiyun 				 priv->msix[idx].irq_name, (void *)priv);
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 		if (rc) {
2286*4882a593Smuzhiyun 			tsi_debug(IMSG, &priv->pdev->dev,
2287*4882a593Smuzhiyun 				"Unable to get MSI-X IRQ for IBOX%d-DONE",
2288*4882a593Smuzhiyun 				mbox);
2289*4882a593Smuzhiyun 			goto out_desc;
2290*4882a593Smuzhiyun 		}
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 		idx = TSI721_VECT_IMB0_INT + mbox;
2293*4882a593Smuzhiyun 		rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
2294*4882a593Smuzhiyun 				 priv->msix[idx].irq_name, (void *)priv);
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 		if (rc)	{
2297*4882a593Smuzhiyun 			tsi_debug(IMSG, &priv->pdev->dev,
2298*4882a593Smuzhiyun 				"Unable to get MSI-X IRQ for IBOX%d-INT", mbox);
2299*4882a593Smuzhiyun 			free_irq(
2300*4882a593Smuzhiyun 				priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
2301*4882a593Smuzhiyun 				(void *)priv);
2302*4882a593Smuzhiyun 			goto out_desc;
2303*4882a593Smuzhiyun 		}
2304*4882a593Smuzhiyun 	}
2305*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 	tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL);
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	/* Initialize Inbound Message Engine */
2310*4882a593Smuzhiyun 	iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch));
2311*4882a593Smuzhiyun 	ioread32(priv->regs + TSI721_IBDMAC_CTL(ch));
2312*4882a593Smuzhiyun 	udelay(10);
2313*4882a593Smuzhiyun 	priv->imsg_ring[mbox].fq_wrptr = entries - 1;
2314*4882a593Smuzhiyun 	iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch));
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	priv->imsg_init[mbox] = 1;
2317*4882a593Smuzhiyun 	return 0;
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
2320*4882a593Smuzhiyun out_desc:
2321*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2322*4882a593Smuzhiyun 		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
2323*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imd_base,
2324*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imd_phys);
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	priv->imsg_ring[mbox].imd_base = NULL;
2327*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun out_dma:
2330*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2331*4882a593Smuzhiyun 		priv->imsg_ring[mbox].size * 8,
2332*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imfq_base,
2333*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imfq_phys);
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	priv->imsg_ring[mbox].imfq_base = NULL;
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun out_buf:
2338*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2339*4882a593Smuzhiyun 		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
2340*4882a593Smuzhiyun 		priv->imsg_ring[mbox].buf_base,
2341*4882a593Smuzhiyun 		priv->imsg_ring[mbox].buf_phys);
2342*4882a593Smuzhiyun 
2343*4882a593Smuzhiyun 	priv->imsg_ring[mbox].buf_base = NULL;
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun out:
2346*4882a593Smuzhiyun 	return rc;
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun /**
2350*4882a593Smuzhiyun  * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox
2351*4882a593Smuzhiyun  * @mport: Master port implementing the Inbound Messaging Engine
2352*4882a593Smuzhiyun  * @mbox: Mailbox to close
2353*4882a593Smuzhiyun  */
tsi721_close_inb_mbox(struct rio_mport * mport,int mbox)2354*4882a593Smuzhiyun static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
2355*4882a593Smuzhiyun {
2356*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
2357*4882a593Smuzhiyun 	u32 rx_slot;
2358*4882a593Smuzhiyun 	int ch = mbox + 4;
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */
2361*4882a593Smuzhiyun 		return;
2362*4882a593Smuzhiyun 	priv->imsg_init[mbox] = 0;
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 	/* Disable Inbound Messaging Engine */
2365*4882a593Smuzhiyun 
2366*4882a593Smuzhiyun 	/* Disable Interrupts */
2367*4882a593Smuzhiyun 	tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK);
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
2370*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX) {
2371*4882a593Smuzhiyun 		free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
2372*4882a593Smuzhiyun 				(void *)priv);
2373*4882a593Smuzhiyun 		free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
2374*4882a593Smuzhiyun 				(void *)priv);
2375*4882a593Smuzhiyun 	}
2376*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 	/* Clear Inbound Buffer Queue */
2379*4882a593Smuzhiyun 	for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++)
2380*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 	/* Free memory allocated for message buffers */
2383*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2384*4882a593Smuzhiyun 		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
2385*4882a593Smuzhiyun 		priv->imsg_ring[mbox].buf_base,
2386*4882a593Smuzhiyun 		priv->imsg_ring[mbox].buf_phys);
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun 	priv->imsg_ring[mbox].buf_base = NULL;
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 	/* Free memory allocated for free pointr list */
2391*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2392*4882a593Smuzhiyun 		priv->imsg_ring[mbox].size * 8,
2393*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imfq_base,
2394*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imfq_phys);
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	priv->imsg_ring[mbox].imfq_base = NULL;
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun 	/* Free memory allocated for RX descriptors */
2399*4882a593Smuzhiyun 	dma_free_coherent(&priv->pdev->dev,
2400*4882a593Smuzhiyun 		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
2401*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imd_base,
2402*4882a593Smuzhiyun 		priv->imsg_ring[mbox].imd_phys);
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 	priv->imsg_ring[mbox].imd_base = NULL;
2405*4882a593Smuzhiyun }
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun /**
2408*4882a593Smuzhiyun  * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue
2409*4882a593Smuzhiyun  * @mport: Master port implementing the Inbound Messaging Engine
2410*4882a593Smuzhiyun  * @mbox: Inbound mailbox number
2411*4882a593Smuzhiyun  * @buf: Buffer to add to inbound queue
2412*4882a593Smuzhiyun  */
tsi721_add_inb_buffer(struct rio_mport * mport,int mbox,void * buf)2413*4882a593Smuzhiyun static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
2414*4882a593Smuzhiyun {
2415*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
2416*4882a593Smuzhiyun 	u32 rx_slot;
2417*4882a593Smuzhiyun 	int rc = 0;
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun 	rx_slot = priv->imsg_ring[mbox].rx_slot;
2420*4882a593Smuzhiyun 	if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
2421*4882a593Smuzhiyun 		tsi_err(&priv->pdev->dev,
2422*4882a593Smuzhiyun 			"Error adding inbound buffer %d, buffer exists",
2423*4882a593Smuzhiyun 			rx_slot);
2424*4882a593Smuzhiyun 		rc = -EINVAL;
2425*4882a593Smuzhiyun 		goto out;
2426*4882a593Smuzhiyun 	}
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 	priv->imsg_ring[mbox].imq_base[rx_slot] = buf;
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size)
2431*4882a593Smuzhiyun 		priv->imsg_ring[mbox].rx_slot = 0;
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun out:
2434*4882a593Smuzhiyun 	return rc;
2435*4882a593Smuzhiyun }
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun /**
2438*4882a593Smuzhiyun  * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue
2439*4882a593Smuzhiyun  * @mport: Master port implementing the Inbound Messaging Engine
2440*4882a593Smuzhiyun  * @mbox: Inbound mailbox number
2441*4882a593Smuzhiyun  *
2442*4882a593Smuzhiyun  * Returns pointer to the message on success or NULL on failure.
2443*4882a593Smuzhiyun  */
tsi721_get_inb_message(struct rio_mport * mport,int mbox)2444*4882a593Smuzhiyun static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
2445*4882a593Smuzhiyun {
2446*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
2447*4882a593Smuzhiyun 	struct tsi721_imsg_desc *desc;
2448*4882a593Smuzhiyun 	u32 rx_slot;
2449*4882a593Smuzhiyun 	void *rx_virt = NULL;
2450*4882a593Smuzhiyun 	u64 rx_phys;
2451*4882a593Smuzhiyun 	void *buf = NULL;
2452*4882a593Smuzhiyun 	u64 *free_ptr;
2453*4882a593Smuzhiyun 	int ch = mbox + 4;
2454*4882a593Smuzhiyun 	int msg_size;
2455*4882a593Smuzhiyun 
2456*4882a593Smuzhiyun 	if (!priv->imsg_init[mbox])
2457*4882a593Smuzhiyun 		return NULL;
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 	desc = priv->imsg_ring[mbox].imd_base;
2460*4882a593Smuzhiyun 	desc += priv->imsg_ring[mbox].desc_rdptr;
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 	if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO))
2463*4882a593Smuzhiyun 		goto out;
2464*4882a593Smuzhiyun 
2465*4882a593Smuzhiyun 	rx_slot = priv->imsg_ring[mbox].rx_slot;
2466*4882a593Smuzhiyun 	while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) {
2467*4882a593Smuzhiyun 		if (++rx_slot == priv->imsg_ring[mbox].size)
2468*4882a593Smuzhiyun 			rx_slot = 0;
2469*4882a593Smuzhiyun 	}
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 	rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) |
2472*4882a593Smuzhiyun 			le32_to_cpu(desc->bufptr_lo);
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun 	rx_virt = priv->imsg_ring[mbox].buf_base +
2475*4882a593Smuzhiyun 		  (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys);
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	buf = priv->imsg_ring[mbox].imq_base[rx_slot];
2478*4882a593Smuzhiyun 	msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT;
2479*4882a593Smuzhiyun 	if (msg_size == 0)
2480*4882a593Smuzhiyun 		msg_size = RIO_MAX_MSG_SIZE;
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	memcpy(buf, rx_virt, msg_size);
2483*4882a593Smuzhiyun 	priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO);
2486*4882a593Smuzhiyun 	if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size)
2487*4882a593Smuzhiyun 		priv->imsg_ring[mbox].desc_rdptr = 0;
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	iowrite32(priv->imsg_ring[mbox].desc_rdptr,
2490*4882a593Smuzhiyun 		priv->regs + TSI721_IBDMAC_DQRP(ch));
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	/* Return free buffer into the pointer list */
2493*4882a593Smuzhiyun 	free_ptr = priv->imsg_ring[mbox].imfq_base;
2494*4882a593Smuzhiyun 	free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys);
2495*4882a593Smuzhiyun 
2496*4882a593Smuzhiyun 	if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size)
2497*4882a593Smuzhiyun 		priv->imsg_ring[mbox].fq_wrptr = 0;
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 	iowrite32(priv->imsg_ring[mbox].fq_wrptr,
2500*4882a593Smuzhiyun 		priv->regs + TSI721_IBDMAC_FQWP(ch));
2501*4882a593Smuzhiyun out:
2502*4882a593Smuzhiyun 	return buf;
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun /**
2506*4882a593Smuzhiyun  * tsi721_messages_init - Initialization of Messaging Engine
2507*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
2508*4882a593Smuzhiyun  *
2509*4882a593Smuzhiyun  * Configures Tsi721 messaging engine.
2510*4882a593Smuzhiyun  */
tsi721_messages_init(struct tsi721_device * priv)2511*4882a593Smuzhiyun static int tsi721_messages_init(struct tsi721_device *priv)
2512*4882a593Smuzhiyun {
2513*4882a593Smuzhiyun 	int	ch;
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG);
2516*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT);
2517*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT);
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	/* Set SRIO Message Request/Response Timeout */
2520*4882a593Smuzhiyun 	iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO);
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun 	/* Initialize Inbound Messaging Engine Registers */
2523*4882a593Smuzhiyun 	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) {
2524*4882a593Smuzhiyun 		/* Clear interrupt bits */
2525*4882a593Smuzhiyun 		iowrite32(TSI721_IBDMAC_INT_MASK,
2526*4882a593Smuzhiyun 			priv->regs + TSI721_IBDMAC_INT(ch));
2527*4882a593Smuzhiyun 		/* Clear Status */
2528*4882a593Smuzhiyun 		iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch));
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 		iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK,
2531*4882a593Smuzhiyun 				priv->regs + TSI721_SMSG_ECC_COR_LOG(ch));
2532*4882a593Smuzhiyun 		iowrite32(TSI721_SMSG_ECC_NCOR_MASK,
2533*4882a593Smuzhiyun 				priv->regs + TSI721_SMSG_ECC_NCOR(ch));
2534*4882a593Smuzhiyun 	}
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 	return 0;
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun /**
2540*4882a593Smuzhiyun  * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue
2541*4882a593Smuzhiyun  * @mport: Master port implementing the Inbound Messaging Engine
2542*4882a593Smuzhiyun  * @mbox: Inbound mailbox number
2543*4882a593Smuzhiyun  *
2544*4882a593Smuzhiyun  * Returns pointer to the message on success or NULL on failure.
2545*4882a593Smuzhiyun  */
tsi721_query_mport(struct rio_mport * mport,struct rio_mport_attr * attr)2546*4882a593Smuzhiyun static int tsi721_query_mport(struct rio_mport *mport,
2547*4882a593Smuzhiyun 			      struct rio_mport_attr *attr)
2548*4882a593Smuzhiyun {
2549*4882a593Smuzhiyun 	struct tsi721_device *priv = mport->priv;
2550*4882a593Smuzhiyun 	u32 rval;
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 	rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0));
2553*4882a593Smuzhiyun 	if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
2554*4882a593Smuzhiyun 		rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0));
2555*4882a593Smuzhiyun 		attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
2556*4882a593Smuzhiyun 		rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0));
2557*4882a593Smuzhiyun 		attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
2558*4882a593Smuzhiyun 	} else
2559*4882a593Smuzhiyun 		attr->link_speed = RIO_LINK_DOWN;
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2562*4882a593Smuzhiyun 	attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG;
2563*4882a593Smuzhiyun 	attr->dma_max_sge = 0;
2564*4882a593Smuzhiyun 	attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT;
2565*4882a593Smuzhiyun 	attr->dma_align = 0;
2566*4882a593Smuzhiyun #else
2567*4882a593Smuzhiyun 	attr->flags = 0;
2568*4882a593Smuzhiyun #endif
2569*4882a593Smuzhiyun 	return 0;
2570*4882a593Smuzhiyun }
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun /**
2573*4882a593Smuzhiyun  * tsi721_disable_ints - disables all device interrupts
2574*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
2575*4882a593Smuzhiyun  */
tsi721_disable_ints(struct tsi721_device * priv)2576*4882a593Smuzhiyun static void tsi721_disable_ints(struct tsi721_device *priv)
2577*4882a593Smuzhiyun {
2578*4882a593Smuzhiyun 	int ch;
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	/* Disable all device level interrupts */
2581*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_DEV_INTE);
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	/* Disable all Device Channel interrupts */
2584*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE);
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun 	/* Disable all Inbound Msg Channel interrupts */
2587*4882a593Smuzhiyun 	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++)
2588*4882a593Smuzhiyun 		iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch));
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 	/* Disable all Outbound Msg Channel interrupts */
2591*4882a593Smuzhiyun 	for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++)
2592*4882a593Smuzhiyun 		iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch));
2593*4882a593Smuzhiyun 
2594*4882a593Smuzhiyun 	/* Disable all general messaging interrupts */
2595*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_SMSG_INTE);
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 	/* Disable all BDMA Channel interrupts */
2598*4882a593Smuzhiyun 	for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
2599*4882a593Smuzhiyun 		iowrite32(0,
2600*4882a593Smuzhiyun 			priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 	/* Disable all general BDMA interrupts */
2603*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_BDMA_INTE);
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	/* Disable all SRIO Channel interrupts */
2606*4882a593Smuzhiyun 	for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++)
2607*4882a593Smuzhiyun 		iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch));
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 	/* Disable all general SR2PC interrupts */
2610*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE);
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	/* Disable all PC2SR interrupts */
2613*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_PC2SR_INTE);
2614*4882a593Smuzhiyun 
2615*4882a593Smuzhiyun 	/* Disable all I2C interrupts */
2616*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE);
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 	/* Disable SRIO MAC interrupts */
2619*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE);
2620*4882a593Smuzhiyun 	iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun static struct rio_ops tsi721_rio_ops = {
2624*4882a593Smuzhiyun 	.lcread			= tsi721_lcread,
2625*4882a593Smuzhiyun 	.lcwrite		= tsi721_lcwrite,
2626*4882a593Smuzhiyun 	.cread			= tsi721_cread_dma,
2627*4882a593Smuzhiyun 	.cwrite			= tsi721_cwrite_dma,
2628*4882a593Smuzhiyun 	.dsend			= tsi721_dsend,
2629*4882a593Smuzhiyun 	.open_inb_mbox		= tsi721_open_inb_mbox,
2630*4882a593Smuzhiyun 	.close_inb_mbox		= tsi721_close_inb_mbox,
2631*4882a593Smuzhiyun 	.open_outb_mbox		= tsi721_open_outb_mbox,
2632*4882a593Smuzhiyun 	.close_outb_mbox	= tsi721_close_outb_mbox,
2633*4882a593Smuzhiyun 	.add_outb_message	= tsi721_add_outb_message,
2634*4882a593Smuzhiyun 	.add_inb_buffer		= tsi721_add_inb_buffer,
2635*4882a593Smuzhiyun 	.get_inb_message	= tsi721_get_inb_message,
2636*4882a593Smuzhiyun 	.map_inb		= tsi721_rio_map_inb_mem,
2637*4882a593Smuzhiyun 	.unmap_inb		= tsi721_rio_unmap_inb_mem,
2638*4882a593Smuzhiyun 	.pwenable		= tsi721_pw_enable,
2639*4882a593Smuzhiyun 	.query_mport		= tsi721_query_mport,
2640*4882a593Smuzhiyun 	.map_outb		= tsi721_map_outb_win,
2641*4882a593Smuzhiyun 	.unmap_outb		= tsi721_unmap_outb_win,
2642*4882a593Smuzhiyun };
2643*4882a593Smuzhiyun 
tsi721_mport_release(struct device * dev)2644*4882a593Smuzhiyun static void tsi721_mport_release(struct device *dev)
2645*4882a593Smuzhiyun {
2646*4882a593Smuzhiyun 	struct rio_mport *mport = to_rio_mport(dev);
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 	tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id);
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun /**
2652*4882a593Smuzhiyun  * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
2653*4882a593Smuzhiyun  * @priv: pointer to tsi721 private data
2654*4882a593Smuzhiyun  *
2655*4882a593Smuzhiyun  * Configures Tsi721 as RapidIO master port.
2656*4882a593Smuzhiyun  */
tsi721_setup_mport(struct tsi721_device * priv)2657*4882a593Smuzhiyun static int tsi721_setup_mport(struct tsi721_device *priv)
2658*4882a593Smuzhiyun {
2659*4882a593Smuzhiyun 	struct pci_dev *pdev = priv->pdev;
2660*4882a593Smuzhiyun 	int err = 0;
2661*4882a593Smuzhiyun 	struct rio_mport *mport = &priv->mport;
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	err = rio_mport_initialize(mport);
2664*4882a593Smuzhiyun 	if (err)
2665*4882a593Smuzhiyun 		return err;
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 	mport->ops = &tsi721_rio_ops;
2668*4882a593Smuzhiyun 	mport->index = 0;
2669*4882a593Smuzhiyun 	mport->sys_size = 0; /* small system */
2670*4882a593Smuzhiyun 	mport->priv = (void *)priv;
2671*4882a593Smuzhiyun 	mport->phys_efptr = 0x100;
2672*4882a593Smuzhiyun 	mport->phys_rmap = 1;
2673*4882a593Smuzhiyun 	mport->dev.parent = &pdev->dev;
2674*4882a593Smuzhiyun 	mport->dev.release = tsi721_mport_release;
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	INIT_LIST_HEAD(&mport->dbells);
2677*4882a593Smuzhiyun 
2678*4882a593Smuzhiyun 	rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
2679*4882a593Smuzhiyun 	rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
2680*4882a593Smuzhiyun 	rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
2681*4882a593Smuzhiyun 	snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)",
2682*4882a593Smuzhiyun 		 dev_driver_string(&pdev->dev), dev_name(&pdev->dev));
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 	/* Hook up interrupt handler */
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
2687*4882a593Smuzhiyun 	if (!tsi721_enable_msix(priv))
2688*4882a593Smuzhiyun 		priv->flags |= TSI721_USING_MSIX;
2689*4882a593Smuzhiyun 	else if (!pci_enable_msi(pdev))
2690*4882a593Smuzhiyun 		priv->flags |= TSI721_USING_MSI;
2691*4882a593Smuzhiyun 	else
2692*4882a593Smuzhiyun 		tsi_debug(MPORT, &pdev->dev,
2693*4882a593Smuzhiyun 			 "MSI/MSI-X is not available. Using legacy INTx.");
2694*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
2695*4882a593Smuzhiyun 
2696*4882a593Smuzhiyun 	err = tsi721_request_irq(priv);
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 	if (err) {
2699*4882a593Smuzhiyun 		tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)",
2700*4882a593Smuzhiyun 			pdev->irq, err);
2701*4882a593Smuzhiyun 		return err;
2702*4882a593Smuzhiyun 	}
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2705*4882a593Smuzhiyun 	err = tsi721_register_dma(priv);
2706*4882a593Smuzhiyun 	if (err)
2707*4882a593Smuzhiyun 		goto err_exit;
2708*4882a593Smuzhiyun #endif
2709*4882a593Smuzhiyun 	/* Enable SRIO link */
2710*4882a593Smuzhiyun 	iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
2711*4882a593Smuzhiyun 		  TSI721_DEVCTL_SRBOOT_CMPL,
2712*4882a593Smuzhiyun 		  priv->regs + TSI721_DEVCTL);
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	if (mport->host_deviceid >= 0)
2715*4882a593Smuzhiyun 		iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
2716*4882a593Smuzhiyun 			  RIO_PORT_GEN_DISCOVERED,
2717*4882a593Smuzhiyun 			  priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2718*4882a593Smuzhiyun 	else
2719*4882a593Smuzhiyun 		iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2720*4882a593Smuzhiyun 
2721*4882a593Smuzhiyun 	err = rio_register_mport(mport);
2722*4882a593Smuzhiyun 	if (err) {
2723*4882a593Smuzhiyun 		tsi721_unregister_dma(priv);
2724*4882a593Smuzhiyun 		goto err_exit;
2725*4882a593Smuzhiyun 	}
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	return 0;
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun err_exit:
2730*4882a593Smuzhiyun 	tsi721_free_irq(priv);
2731*4882a593Smuzhiyun 	return err;
2732*4882a593Smuzhiyun }
2733*4882a593Smuzhiyun 
tsi721_probe(struct pci_dev * pdev,const struct pci_device_id * id)2734*4882a593Smuzhiyun static int tsi721_probe(struct pci_dev *pdev,
2735*4882a593Smuzhiyun 				  const struct pci_device_id *id)
2736*4882a593Smuzhiyun {
2737*4882a593Smuzhiyun 	struct tsi721_device *priv;
2738*4882a593Smuzhiyun 	int err;
2739*4882a593Smuzhiyun 
2740*4882a593Smuzhiyun 	priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
2741*4882a593Smuzhiyun 	if (!priv) {
2742*4882a593Smuzhiyun 		err = -ENOMEM;
2743*4882a593Smuzhiyun 		goto err_exit;
2744*4882a593Smuzhiyun 	}
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 	err = pci_enable_device(pdev);
2747*4882a593Smuzhiyun 	if (err) {
2748*4882a593Smuzhiyun 		tsi_err(&pdev->dev, "Failed to enable PCI device");
2749*4882a593Smuzhiyun 		goto err_clean;
2750*4882a593Smuzhiyun 	}
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun 	priv->pdev = pdev;
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun #ifdef DEBUG
2755*4882a593Smuzhiyun 	{
2756*4882a593Smuzhiyun 		int i;
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2759*4882a593Smuzhiyun 			tsi_debug(INIT, &pdev->dev, "res%d %pR",
2760*4882a593Smuzhiyun 				  i, &pdev->resource[i]);
2761*4882a593Smuzhiyun 		}
2762*4882a593Smuzhiyun 	}
2763*4882a593Smuzhiyun #endif
2764*4882a593Smuzhiyun 	/*
2765*4882a593Smuzhiyun 	 * Verify BAR configuration
2766*4882a593Smuzhiyun 	 */
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun 	/* BAR_0 (registers) must be 512KB+ in 32-bit address space */
2769*4882a593Smuzhiyun 	if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
2770*4882a593Smuzhiyun 	    pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
2771*4882a593Smuzhiyun 	    pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
2772*4882a593Smuzhiyun 		tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0");
2773*4882a593Smuzhiyun 		err = -ENODEV;
2774*4882a593Smuzhiyun 		goto err_disable_pdev;
2775*4882a593Smuzhiyun 	}
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun 	/* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */
2778*4882a593Smuzhiyun 	if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
2779*4882a593Smuzhiyun 	    pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
2780*4882a593Smuzhiyun 	    pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
2781*4882a593Smuzhiyun 		tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1");
2782*4882a593Smuzhiyun 		err = -ENODEV;
2783*4882a593Smuzhiyun 		goto err_disable_pdev;
2784*4882a593Smuzhiyun 	}
2785*4882a593Smuzhiyun 
2786*4882a593Smuzhiyun 	/*
2787*4882a593Smuzhiyun 	 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address
2788*4882a593Smuzhiyun 	 * space.
2789*4882a593Smuzhiyun 	 * NOTE: BAR_2 and BAR_4 are not used by this version of driver.
2790*4882a593Smuzhiyun 	 * It may be a good idea to keep them disabled using HW configuration
2791*4882a593Smuzhiyun 	 * to save PCI memory space.
2792*4882a593Smuzhiyun 	 */
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun 	priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0;
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun 	if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) {
2797*4882a593Smuzhiyun 		if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH)
2798*4882a593Smuzhiyun 			tsi_debug(INIT, &pdev->dev,
2799*4882a593Smuzhiyun 				 "Prefetchable OBW BAR2 will not be used");
2800*4882a593Smuzhiyun 		else {
2801*4882a593Smuzhiyun 			priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2);
2802*4882a593Smuzhiyun 			priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2);
2803*4882a593Smuzhiyun 		}
2804*4882a593Smuzhiyun 	}
2805*4882a593Smuzhiyun 
2806*4882a593Smuzhiyun 	if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) {
2807*4882a593Smuzhiyun 		if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH)
2808*4882a593Smuzhiyun 			tsi_debug(INIT, &pdev->dev,
2809*4882a593Smuzhiyun 				 "Prefetchable OBW BAR4 will not be used");
2810*4882a593Smuzhiyun 		else {
2811*4882a593Smuzhiyun 			priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4);
2812*4882a593Smuzhiyun 			priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4);
2813*4882a593Smuzhiyun 		}
2814*4882a593Smuzhiyun 	}
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 	err = pci_request_regions(pdev, DRV_NAME);
2817*4882a593Smuzhiyun 	if (err) {
2818*4882a593Smuzhiyun 		tsi_err(&pdev->dev, "Unable to obtain PCI resources");
2819*4882a593Smuzhiyun 		goto err_disable_pdev;
2820*4882a593Smuzhiyun 	}
2821*4882a593Smuzhiyun 
2822*4882a593Smuzhiyun 	pci_set_master(pdev);
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 	priv->regs = pci_ioremap_bar(pdev, BAR_0);
2825*4882a593Smuzhiyun 	if (!priv->regs) {
2826*4882a593Smuzhiyun 		tsi_err(&pdev->dev, "Unable to map device registers space");
2827*4882a593Smuzhiyun 		err = -ENOMEM;
2828*4882a593Smuzhiyun 		goto err_free_res;
2829*4882a593Smuzhiyun 	}
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun 	priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
2832*4882a593Smuzhiyun 	if (!priv->odb_base) {
2833*4882a593Smuzhiyun 		tsi_err(&pdev->dev, "Unable to map outbound doorbells space");
2834*4882a593Smuzhiyun 		err = -ENOMEM;
2835*4882a593Smuzhiyun 		goto err_unmap_bars;
2836*4882a593Smuzhiyun 	}
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 	/* Configure DMA attributes. */
2839*4882a593Smuzhiyun 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2840*4882a593Smuzhiyun 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2841*4882a593Smuzhiyun 		if (err) {
2842*4882a593Smuzhiyun 			tsi_err(&pdev->dev, "Unable to set DMA mask");
2843*4882a593Smuzhiyun 			goto err_unmap_bars;
2844*4882a593Smuzhiyun 		}
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2847*4882a593Smuzhiyun 			tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
2848*4882a593Smuzhiyun 	} else {
2849*4882a593Smuzhiyun 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2850*4882a593Smuzhiyun 		if (err)
2851*4882a593Smuzhiyun 			tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
2852*4882a593Smuzhiyun 	}
2853*4882a593Smuzhiyun 
2854*4882a593Smuzhiyun 	BUG_ON(!pci_is_pcie(pdev));
2855*4882a593Smuzhiyun 
2856*4882a593Smuzhiyun 	/* Clear "no snoop" and "relaxed ordering" bits. */
2857*4882a593Smuzhiyun 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2858*4882a593Smuzhiyun 		PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
2859*4882a593Smuzhiyun 
2860*4882a593Smuzhiyun 	/* Override PCIe Maximum Read Request Size setting if requested */
2861*4882a593Smuzhiyun 	if (pcie_mrrs >= 0) {
2862*4882a593Smuzhiyun 		if (pcie_mrrs <= 5)
2863*4882a593Smuzhiyun 			pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2864*4882a593Smuzhiyun 					PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12);
2865*4882a593Smuzhiyun 		else
2866*4882a593Smuzhiyun 			tsi_info(&pdev->dev,
2867*4882a593Smuzhiyun 				 "Invalid MRRS override value %d", pcie_mrrs);
2868*4882a593Smuzhiyun 	}
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun 	/* Set PCIe completion timeout to 1-10ms */
2871*4882a593Smuzhiyun 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2,
2872*4882a593Smuzhiyun 					   PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2);
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun 	/*
2875*4882a593Smuzhiyun 	 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
2876*4882a593Smuzhiyun 	 */
2877*4882a593Smuzhiyun 	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01);
2878*4882a593Smuzhiyun 	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL,
2879*4882a593Smuzhiyun 						TSI721_MSIXTBL_OFFSET);
2880*4882a593Smuzhiyun 	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA,
2881*4882a593Smuzhiyun 						TSI721_MSIXPBA_OFFSET);
2882*4882a593Smuzhiyun 	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0);
2883*4882a593Smuzhiyun 	/* End of FIXUP */
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 	tsi721_disable_ints(priv);
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 	tsi721_init_pc2sr_mapping(priv);
2888*4882a593Smuzhiyun 	tsi721_init_sr2pc_mapping(priv);
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun 	if (tsi721_bdma_maint_init(priv)) {
2891*4882a593Smuzhiyun 		tsi_err(&pdev->dev, "BDMA initialization failed");
2892*4882a593Smuzhiyun 		err = -ENOMEM;
2893*4882a593Smuzhiyun 		goto err_unmap_bars;
2894*4882a593Smuzhiyun 	}
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 	err = tsi721_doorbell_init(priv);
2897*4882a593Smuzhiyun 	if (err)
2898*4882a593Smuzhiyun 		goto err_free_bdma;
2899*4882a593Smuzhiyun 
2900*4882a593Smuzhiyun 	tsi721_port_write_init(priv);
2901*4882a593Smuzhiyun 
2902*4882a593Smuzhiyun 	err = tsi721_messages_init(priv);
2903*4882a593Smuzhiyun 	if (err)
2904*4882a593Smuzhiyun 		goto err_free_consistent;
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 	err = tsi721_setup_mport(priv);
2907*4882a593Smuzhiyun 	if (err)
2908*4882a593Smuzhiyun 		goto err_free_consistent;
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun 	pci_set_drvdata(pdev, priv);
2911*4882a593Smuzhiyun 	tsi721_interrupts_init(priv);
2912*4882a593Smuzhiyun 
2913*4882a593Smuzhiyun 	return 0;
2914*4882a593Smuzhiyun 
2915*4882a593Smuzhiyun err_free_consistent:
2916*4882a593Smuzhiyun 	tsi721_port_write_free(priv);
2917*4882a593Smuzhiyun 	tsi721_doorbell_free(priv);
2918*4882a593Smuzhiyun err_free_bdma:
2919*4882a593Smuzhiyun 	tsi721_bdma_maint_free(priv);
2920*4882a593Smuzhiyun err_unmap_bars:
2921*4882a593Smuzhiyun 	if (priv->regs)
2922*4882a593Smuzhiyun 		iounmap(priv->regs);
2923*4882a593Smuzhiyun 	if (priv->odb_base)
2924*4882a593Smuzhiyun 		iounmap(priv->odb_base);
2925*4882a593Smuzhiyun err_free_res:
2926*4882a593Smuzhiyun 	pci_release_regions(pdev);
2927*4882a593Smuzhiyun 	pci_clear_master(pdev);
2928*4882a593Smuzhiyun err_disable_pdev:
2929*4882a593Smuzhiyun 	pci_disable_device(pdev);
2930*4882a593Smuzhiyun err_clean:
2931*4882a593Smuzhiyun 	kfree(priv);
2932*4882a593Smuzhiyun err_exit:
2933*4882a593Smuzhiyun 	return err;
2934*4882a593Smuzhiyun }
2935*4882a593Smuzhiyun 
tsi721_remove(struct pci_dev * pdev)2936*4882a593Smuzhiyun static void tsi721_remove(struct pci_dev *pdev)
2937*4882a593Smuzhiyun {
2938*4882a593Smuzhiyun 	struct tsi721_device *priv = pci_get_drvdata(pdev);
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 	tsi_debug(EXIT, &pdev->dev, "enter");
2941*4882a593Smuzhiyun 
2942*4882a593Smuzhiyun 	tsi721_disable_ints(priv);
2943*4882a593Smuzhiyun 	tsi721_free_irq(priv);
2944*4882a593Smuzhiyun 	flush_scheduled_work();
2945*4882a593Smuzhiyun 	rio_unregister_mport(&priv->mport);
2946*4882a593Smuzhiyun 
2947*4882a593Smuzhiyun 	tsi721_unregister_dma(priv);
2948*4882a593Smuzhiyun 	tsi721_bdma_maint_free(priv);
2949*4882a593Smuzhiyun 	tsi721_doorbell_free(priv);
2950*4882a593Smuzhiyun 	tsi721_port_write_free(priv);
2951*4882a593Smuzhiyun 	tsi721_close_sr2pc_mapping(priv);
2952*4882a593Smuzhiyun 
2953*4882a593Smuzhiyun 	if (priv->regs)
2954*4882a593Smuzhiyun 		iounmap(priv->regs);
2955*4882a593Smuzhiyun 	if (priv->odb_base)
2956*4882a593Smuzhiyun 		iounmap(priv->odb_base);
2957*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
2958*4882a593Smuzhiyun 	if (priv->flags & TSI721_USING_MSIX)
2959*4882a593Smuzhiyun 		pci_disable_msix(priv->pdev);
2960*4882a593Smuzhiyun 	else if (priv->flags & TSI721_USING_MSI)
2961*4882a593Smuzhiyun 		pci_disable_msi(priv->pdev);
2962*4882a593Smuzhiyun #endif
2963*4882a593Smuzhiyun 	pci_release_regions(pdev);
2964*4882a593Smuzhiyun 	pci_clear_master(pdev);
2965*4882a593Smuzhiyun 	pci_disable_device(pdev);
2966*4882a593Smuzhiyun 	pci_set_drvdata(pdev, NULL);
2967*4882a593Smuzhiyun 	kfree(priv);
2968*4882a593Smuzhiyun 	tsi_debug(EXIT, &pdev->dev, "exit");
2969*4882a593Smuzhiyun }
2970*4882a593Smuzhiyun 
tsi721_shutdown(struct pci_dev * pdev)2971*4882a593Smuzhiyun static void tsi721_shutdown(struct pci_dev *pdev)
2972*4882a593Smuzhiyun {
2973*4882a593Smuzhiyun 	struct tsi721_device *priv = pci_get_drvdata(pdev);
2974*4882a593Smuzhiyun 
2975*4882a593Smuzhiyun 	tsi_debug(EXIT, &pdev->dev, "enter");
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun 	tsi721_disable_ints(priv);
2978*4882a593Smuzhiyun 	tsi721_dma_stop_all(priv);
2979*4882a593Smuzhiyun 	pci_clear_master(pdev);
2980*4882a593Smuzhiyun 	pci_disable_device(pdev);
2981*4882a593Smuzhiyun }
2982*4882a593Smuzhiyun 
2983*4882a593Smuzhiyun static const struct pci_device_id tsi721_pci_tbl[] = {
2984*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
2985*4882a593Smuzhiyun 	{ 0, }	/* terminate list */
2986*4882a593Smuzhiyun };
2987*4882a593Smuzhiyun 
2988*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl);
2989*4882a593Smuzhiyun 
2990*4882a593Smuzhiyun static struct pci_driver tsi721_driver = {
2991*4882a593Smuzhiyun 	.name		= "tsi721",
2992*4882a593Smuzhiyun 	.id_table	= tsi721_pci_tbl,
2993*4882a593Smuzhiyun 	.probe		= tsi721_probe,
2994*4882a593Smuzhiyun 	.remove		= tsi721_remove,
2995*4882a593Smuzhiyun 	.shutdown	= tsi721_shutdown,
2996*4882a593Smuzhiyun };
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun module_pci_driver(tsi721_driver);
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver");
3001*4882a593Smuzhiyun MODULE_AUTHOR("Integrated Device Technology, Inc.");
3002*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3003