xref: /OK3568_Linux_fs/kernel/drivers/media/pci/b2c2/flexcop-pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
4*4882a593Smuzhiyun  * flexcop-pci.c - covers the PCI part including DMA transfers
5*4882a593Smuzhiyun  * see flexcop.c for copyright information
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define FC_LOG_PREFIX "flexcop-pci"
9*4882a593Smuzhiyun #include "flexcop-common.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun static int enable_pid_filtering = 1;
12*4882a593Smuzhiyun module_param(enable_pid_filtering, int, 0444);
13*4882a593Smuzhiyun MODULE_PARM_DESC(enable_pid_filtering,
14*4882a593Smuzhiyun 	"enable hardware pid filtering: supported values: 0 (fullts), 1");
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun static int irq_chk_intv = 100;
17*4882a593Smuzhiyun module_param(irq_chk_intv, int, 0644);
18*4882a593Smuzhiyun MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
21*4882a593Smuzhiyun #define dprintk(level,args...) \
22*4882a593Smuzhiyun 	do { if ((debug & level)) printk(args); } while (0)
23*4882a593Smuzhiyun #define DEBSTATUS ""
24*4882a593Smuzhiyun #else
25*4882a593Smuzhiyun #define dprintk(level,args...)
26*4882a593Smuzhiyun #define DEBSTATUS " (debugging is not enabled)"
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define deb_info(args...) dprintk(0x01, args)
30*4882a593Smuzhiyun #define deb_reg(args...) dprintk(0x02, args)
31*4882a593Smuzhiyun #define deb_ts(args...) dprintk(0x04, args)
32*4882a593Smuzhiyun #define deb_irq(args...) dprintk(0x08, args)
33*4882a593Smuzhiyun #define deb_chk(args...) dprintk(0x10, args)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static int debug;
36*4882a593Smuzhiyun module_param(debug, int, 0644);
37*4882a593Smuzhiyun MODULE_PARM_DESC(debug,
38*4882a593Smuzhiyun 	"set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
39*4882a593Smuzhiyun 	DEBSTATUS);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define DRIVER_VERSION "0.1"
42*4882a593Smuzhiyun #define DRIVER_NAME "flexcop-pci"
43*4882a593Smuzhiyun #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun struct flexcop_pci {
46*4882a593Smuzhiyun 	struct pci_dev *pdev;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define FC_PCI_INIT     0x01
49*4882a593Smuzhiyun #define FC_PCI_DMA_INIT 0x02
50*4882a593Smuzhiyun 	int init_state;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	void __iomem *io_mem;
53*4882a593Smuzhiyun 	u32 irq;
54*4882a593Smuzhiyun 	/* buffersize (at least for DMA1, need to be % 188 == 0,
55*4882a593Smuzhiyun 	 * this logic is required */
56*4882a593Smuzhiyun #define FC_DEFAULT_DMA1_BUFSIZE (1280 * 188)
57*4882a593Smuzhiyun #define FC_DEFAULT_DMA2_BUFSIZE (10 * 188)
58*4882a593Smuzhiyun 	struct flexcop_dma dma[2];
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
61*4882a593Smuzhiyun 	u32 last_dma1_cur_pos;
62*4882a593Smuzhiyun 	/* position of the pointer last time the timer/packet irq occurred */
63*4882a593Smuzhiyun 	int count;
64*4882a593Smuzhiyun 	int count_prev;
65*4882a593Smuzhiyun 	int stream_problem;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	spinlock_t irq_lock;
68*4882a593Smuzhiyun 	unsigned long last_irq;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	struct delayed_work irq_check_work;
71*4882a593Smuzhiyun 	struct flexcop_device *fc_dev;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun static int lastwreg, lastwval, lastrreg, lastrval;
75*4882a593Smuzhiyun 
flexcop_pci_read_ibi_reg(struct flexcop_device * fc,flexcop_ibi_register r)76*4882a593Smuzhiyun static flexcop_ibi_value flexcop_pci_read_ibi_reg(struct flexcop_device *fc,
77*4882a593Smuzhiyun 		flexcop_ibi_register r)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct flexcop_pci *fc_pci = fc->bus_specific;
80*4882a593Smuzhiyun 	flexcop_ibi_value v;
81*4882a593Smuzhiyun 	v.raw = readl(fc_pci->io_mem + r);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (lastrreg != r || lastrval != v.raw) {
84*4882a593Smuzhiyun 		lastrreg = r; lastrval = v.raw;
85*4882a593Smuzhiyun 		deb_reg("new rd: %3x: %08x\n", r, v.raw);
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return v;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
flexcop_pci_write_ibi_reg(struct flexcop_device * fc,flexcop_ibi_register r,flexcop_ibi_value v)91*4882a593Smuzhiyun static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc,
92*4882a593Smuzhiyun 		flexcop_ibi_register r, flexcop_ibi_value v)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct flexcop_pci *fc_pci = fc->bus_specific;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (lastwreg != r || lastwval != v.raw) {
97*4882a593Smuzhiyun 		lastwreg = r; lastwval = v.raw;
98*4882a593Smuzhiyun 		deb_reg("new wr: %3x: %08x\n", r, v.raw);
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	writel(v.raw, fc_pci->io_mem + r);
102*4882a593Smuzhiyun 	return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
flexcop_pci_irq_check_work(struct work_struct * work)105*4882a593Smuzhiyun static void flexcop_pci_irq_check_work(struct work_struct *work)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct flexcop_pci *fc_pci =
108*4882a593Smuzhiyun 		container_of(work, struct flexcop_pci, irq_check_work.work);
109*4882a593Smuzhiyun 	struct flexcop_device *fc = fc_pci->fc_dev;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (fc->feedcount) {
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		if (fc_pci->count == fc_pci->count_prev) {
114*4882a593Smuzhiyun 			deb_chk("no IRQ since the last check\n");
115*4882a593Smuzhiyun 			if (fc_pci->stream_problem++ == 3) {
116*4882a593Smuzhiyun 				struct dvb_demux_feed *feed;
117*4882a593Smuzhiyun 				deb_info("flexcop-pci: stream problem, resetting pid filter\n");
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 				spin_lock_irq(&fc->demux.lock);
120*4882a593Smuzhiyun 				list_for_each_entry(feed, &fc->demux.feed_list,
121*4882a593Smuzhiyun 						list_head) {
122*4882a593Smuzhiyun 					flexcop_pid_feed_control(fc, feed, 0);
123*4882a593Smuzhiyun 				}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 				list_for_each_entry(feed, &fc->demux.feed_list,
126*4882a593Smuzhiyun 						list_head) {
127*4882a593Smuzhiyun 					flexcop_pid_feed_control(fc, feed, 1);
128*4882a593Smuzhiyun 				}
129*4882a593Smuzhiyun 				spin_unlock_irq(&fc->demux.lock);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 				fc_pci->stream_problem = 0;
132*4882a593Smuzhiyun 			}
133*4882a593Smuzhiyun 		} else {
134*4882a593Smuzhiyun 			fc_pci->stream_problem = 0;
135*4882a593Smuzhiyun 			fc_pci->count_prev = fc_pci->count;
136*4882a593Smuzhiyun 		}
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	schedule_delayed_work(&fc_pci->irq_check_work,
140*4882a593Smuzhiyun 			msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /* When PID filtering is turned on, we use the timer IRQ, because small amounts
144*4882a593Smuzhiyun  * of data need to be passed to the user space instantly as well. When PID
145*4882a593Smuzhiyun  * filtering is turned off, we use the page-change-IRQ */
flexcop_pci_isr(int irq,void * dev_id)146*4882a593Smuzhiyun static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct flexcop_pci *fc_pci = dev_id;
149*4882a593Smuzhiyun 	struct flexcop_device *fc = fc_pci->fc_dev;
150*4882a593Smuzhiyun 	unsigned long flags;
151*4882a593Smuzhiyun 	flexcop_ibi_value v;
152*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_HANDLED;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	spin_lock_irqsave(&fc_pci->irq_lock, flags);
155*4882a593Smuzhiyun 	v = fc->read_ibi_reg(fc, irq_20c);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/* errors */
158*4882a593Smuzhiyun 	if (v.irq_20c.Data_receiver_error)
159*4882a593Smuzhiyun 		deb_chk("data receiver error\n");
160*4882a593Smuzhiyun 	if (v.irq_20c.Continuity_error_flag)
161*4882a593Smuzhiyun 		deb_chk("Continuity error flag is set\n");
162*4882a593Smuzhiyun 	if (v.irq_20c.LLC_SNAP_FLAG_set)
163*4882a593Smuzhiyun 		deb_chk("LLC_SNAP_FLAG_set is set\n");
164*4882a593Smuzhiyun 	if (v.irq_20c.Transport_Error)
165*4882a593Smuzhiyun 		deb_chk("Transport error\n");
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if ((fc_pci->count % 1000) == 0)
168*4882a593Smuzhiyun 		deb_chk("%d valid irq took place so far\n", fc_pci->count);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (v.irq_20c.DMA1_IRQ_Status == 1) {
171*4882a593Smuzhiyun 		if (fc_pci->active_dma1_addr == 0)
172*4882a593Smuzhiyun 			flexcop_pass_dmx_packets(fc_pci->fc_dev,
173*4882a593Smuzhiyun 					fc_pci->dma[0].cpu_addr0,
174*4882a593Smuzhiyun 					fc_pci->dma[0].size / 188);
175*4882a593Smuzhiyun 		else
176*4882a593Smuzhiyun 			flexcop_pass_dmx_packets(fc_pci->fc_dev,
177*4882a593Smuzhiyun 					fc_pci->dma[0].cpu_addr1,
178*4882a593Smuzhiyun 					fc_pci->dma[0].size / 188);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
181*4882a593Smuzhiyun 		fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
182*4882a593Smuzhiyun 		/* for the timer IRQ we only can use buffer dmx feeding, because we don't have
183*4882a593Smuzhiyun 		 * complete TS packets when reading from the DMA memory */
184*4882a593Smuzhiyun 	} else if (v.irq_20c.DMA1_Timer_Status == 1) {
185*4882a593Smuzhiyun 		dma_addr_t cur_addr =
186*4882a593Smuzhiyun 			fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
187*4882a593Smuzhiyun 		u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
188*4882a593Smuzhiyun 		if (cur_pos > fc_pci->dma[0].size * 2)
189*4882a593Smuzhiyun 			goto error;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 		deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
192*4882a593Smuzhiyun 				jiffies_to_usecs(jiffies - fc_pci->last_irq),
193*4882a593Smuzhiyun 				v.raw, (unsigned long long)cur_addr, cur_pos,
194*4882a593Smuzhiyun 				fc_pci->last_dma1_cur_pos);
195*4882a593Smuzhiyun 		fc_pci->last_irq = jiffies;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		/* buffer end was reached, restarted from the beginning
198*4882a593Smuzhiyun 		 * pass the data from last_cur_pos to the buffer end to the demux
199*4882a593Smuzhiyun 		 */
200*4882a593Smuzhiyun 		if (cur_pos < fc_pci->last_dma1_cur_pos) {
201*4882a593Smuzhiyun 			deb_irq(" end was reached: passing %d bytes ",
202*4882a593Smuzhiyun 				(fc_pci->dma[0].size*2 - 1) -
203*4882a593Smuzhiyun 				fc_pci->last_dma1_cur_pos);
204*4882a593Smuzhiyun 			flexcop_pass_dmx_data(fc_pci->fc_dev,
205*4882a593Smuzhiyun 				fc_pci->dma[0].cpu_addr0 +
206*4882a593Smuzhiyun 					fc_pci->last_dma1_cur_pos,
207*4882a593Smuzhiyun 				(fc_pci->dma[0].size*2) -
208*4882a593Smuzhiyun 					fc_pci->last_dma1_cur_pos);
209*4882a593Smuzhiyun 			fc_pci->last_dma1_cur_pos = 0;
210*4882a593Smuzhiyun 		}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		if (cur_pos > fc_pci->last_dma1_cur_pos) {
213*4882a593Smuzhiyun 			deb_irq(" passing %d bytes ",
214*4882a593Smuzhiyun 				cur_pos - fc_pci->last_dma1_cur_pos);
215*4882a593Smuzhiyun 			flexcop_pass_dmx_data(fc_pci->fc_dev,
216*4882a593Smuzhiyun 				fc_pci->dma[0].cpu_addr0 +
217*4882a593Smuzhiyun 					fc_pci->last_dma1_cur_pos,
218*4882a593Smuzhiyun 				cur_pos - fc_pci->last_dma1_cur_pos);
219*4882a593Smuzhiyun 		}
220*4882a593Smuzhiyun 		deb_irq("\n");
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		fc_pci->last_dma1_cur_pos = cur_pos;
223*4882a593Smuzhiyun 		fc_pci->count++;
224*4882a593Smuzhiyun 	} else {
225*4882a593Smuzhiyun 		deb_irq("isr for flexcop called, apparently without reason (%08x)\n",
226*4882a593Smuzhiyun 			v.raw);
227*4882a593Smuzhiyun 		ret = IRQ_NONE;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun error:
231*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fc_pci->irq_lock, flags);
232*4882a593Smuzhiyun 	return ret;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
flexcop_pci_stream_control(struct flexcop_device * fc,int onoff)235*4882a593Smuzhiyun static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct flexcop_pci *fc_pci = fc->bus_specific;
238*4882a593Smuzhiyun 	if (onoff) {
239*4882a593Smuzhiyun 		flexcop_dma_config(fc, &fc_pci->dma[0], FC_DMA_1);
240*4882a593Smuzhiyun 		flexcop_dma_config(fc, &fc_pci->dma[1], FC_DMA_2);
241*4882a593Smuzhiyun 		flexcop_dma_config_timer(fc, FC_DMA_1, 0);
242*4882a593Smuzhiyun 		flexcop_dma_xfer_control(fc, FC_DMA_1,
243*4882a593Smuzhiyun 				FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 1);
244*4882a593Smuzhiyun 		deb_irq("DMA xfer enabled\n");
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		fc_pci->last_dma1_cur_pos = 0;
247*4882a593Smuzhiyun 		flexcop_dma_control_timer_irq(fc, FC_DMA_1, 1);
248*4882a593Smuzhiyun 		deb_irq("IRQ enabled\n");
249*4882a593Smuzhiyun 		fc_pci->count_prev = fc_pci->count;
250*4882a593Smuzhiyun 	} else {
251*4882a593Smuzhiyun 		flexcop_dma_control_timer_irq(fc, FC_DMA_1, 0);
252*4882a593Smuzhiyun 		deb_irq("IRQ disabled\n");
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 		flexcop_dma_xfer_control(fc, FC_DMA_1,
255*4882a593Smuzhiyun 			 FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 0);
256*4882a593Smuzhiyun 		deb_irq("DMA xfer disabled\n");
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 	return 0;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
flexcop_pci_dma_init(struct flexcop_pci * fc_pci)261*4882a593Smuzhiyun static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	int ret;
264*4882a593Smuzhiyun 	ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[0],
265*4882a593Smuzhiyun 			FC_DEFAULT_DMA1_BUFSIZE);
266*4882a593Smuzhiyun 	if (ret != 0)
267*4882a593Smuzhiyun 		return ret;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[1],
270*4882a593Smuzhiyun 			FC_DEFAULT_DMA2_BUFSIZE);
271*4882a593Smuzhiyun 	if (ret != 0) {
272*4882a593Smuzhiyun 		flexcop_dma_free(&fc_pci->dma[0]);
273*4882a593Smuzhiyun 		return ret;
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_MEDIA |
277*4882a593Smuzhiyun 			FC_SRAM_DEST_NET, FC_SRAM_DEST_TARGET_DMA1);
278*4882a593Smuzhiyun 	flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_CAO |
279*4882a593Smuzhiyun 			FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_DMA2);
280*4882a593Smuzhiyun 	fc_pci->init_state |= FC_PCI_DMA_INIT;
281*4882a593Smuzhiyun 	return ret;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
flexcop_pci_dma_exit(struct flexcop_pci * fc_pci)284*4882a593Smuzhiyun static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	if (fc_pci->init_state & FC_PCI_DMA_INIT) {
287*4882a593Smuzhiyun 		flexcop_dma_free(&fc_pci->dma[0]);
288*4882a593Smuzhiyun 		flexcop_dma_free(&fc_pci->dma[1]);
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 	fc_pci->init_state &= ~FC_PCI_DMA_INIT;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
flexcop_pci_init(struct flexcop_pci * fc_pci)293*4882a593Smuzhiyun static int flexcop_pci_init(struct flexcop_pci *fc_pci)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	int ret;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	info("card revision %x", fc_pci->pdev->revision);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
300*4882a593Smuzhiyun 		return ret;
301*4882a593Smuzhiyun 	pci_set_master(fc_pci->pdev);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
304*4882a593Smuzhiyun 		goto err_pci_disable_device;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (!fc_pci->io_mem) {
309*4882a593Smuzhiyun 		err("cannot map io memory\n");
310*4882a593Smuzhiyun 		ret = -EIO;
311*4882a593Smuzhiyun 		goto err_pci_release_regions;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	pci_set_drvdata(fc_pci->pdev, fc_pci);
315*4882a593Smuzhiyun 	spin_lock_init(&fc_pci->irq_lock);
316*4882a593Smuzhiyun 	if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
317*4882a593Smuzhiyun 					IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
318*4882a593Smuzhiyun 		goto err_pci_iounmap;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	fc_pci->init_state |= FC_PCI_INIT;
321*4882a593Smuzhiyun 	return ret;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun err_pci_iounmap:
324*4882a593Smuzhiyun 	pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
325*4882a593Smuzhiyun err_pci_release_regions:
326*4882a593Smuzhiyun 	pci_release_regions(fc_pci->pdev);
327*4882a593Smuzhiyun err_pci_disable_device:
328*4882a593Smuzhiyun 	pci_disable_device(fc_pci->pdev);
329*4882a593Smuzhiyun 	return ret;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
flexcop_pci_exit(struct flexcop_pci * fc_pci)332*4882a593Smuzhiyun static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	if (fc_pci->init_state & FC_PCI_INIT) {
335*4882a593Smuzhiyun 		free_irq(fc_pci->pdev->irq, fc_pci);
336*4882a593Smuzhiyun 		pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
337*4882a593Smuzhiyun 		pci_release_regions(fc_pci->pdev);
338*4882a593Smuzhiyun 		pci_disable_device(fc_pci->pdev);
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 	fc_pci->init_state &= ~FC_PCI_INIT;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
flexcop_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)343*4882a593Smuzhiyun static int flexcop_pci_probe(struct pci_dev *pdev,
344*4882a593Smuzhiyun 		const struct pci_device_id *ent)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct flexcop_device *fc;
347*4882a593Smuzhiyun 	struct flexcop_pci *fc_pci;
348*4882a593Smuzhiyun 	int ret = -ENOMEM;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_pci))) == NULL) {
351*4882a593Smuzhiyun 		err("out of memory\n");
352*4882a593Smuzhiyun 		return -ENOMEM;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/* general flexcop init */
356*4882a593Smuzhiyun 	fc_pci = fc->bus_specific;
357*4882a593Smuzhiyun 	fc_pci->fc_dev = fc;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	fc->read_ibi_reg = flexcop_pci_read_ibi_reg;
360*4882a593Smuzhiyun 	fc->write_ibi_reg = flexcop_pci_write_ibi_reg;
361*4882a593Smuzhiyun 	fc->i2c_request = flexcop_i2c_request;
362*4882a593Smuzhiyun 	fc->get_mac_addr = flexcop_eeprom_check_mac_addr;
363*4882a593Smuzhiyun 	fc->stream_control = flexcop_pci_stream_control;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (enable_pid_filtering)
366*4882a593Smuzhiyun 		info("will use the HW PID filter.");
367*4882a593Smuzhiyun 	else
368*4882a593Smuzhiyun 		info("will pass the complete TS to the demuxer.");
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	fc->pid_filtering = enable_pid_filtering;
371*4882a593Smuzhiyun 	fc->bus_type = FC_PCI;
372*4882a593Smuzhiyun 	fc->dev = &pdev->dev;
373*4882a593Smuzhiyun 	fc->owner = THIS_MODULE;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* bus specific part */
376*4882a593Smuzhiyun 	fc_pci->pdev = pdev;
377*4882a593Smuzhiyun 	if ((ret = flexcop_pci_init(fc_pci)) != 0)
378*4882a593Smuzhiyun 		goto err_kfree;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* init flexcop */
381*4882a593Smuzhiyun 	if ((ret = flexcop_device_initialize(fc)) != 0)
382*4882a593Smuzhiyun 		goto err_pci_exit;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	/* init dma */
385*4882a593Smuzhiyun 	if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
386*4882a593Smuzhiyun 		goto err_fc_exit;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (irq_chk_intv > 0)
391*4882a593Smuzhiyun 		schedule_delayed_work(&fc_pci->irq_check_work,
392*4882a593Smuzhiyun 				msecs_to_jiffies(irq_chk_intv < 100 ?
393*4882a593Smuzhiyun 					100 :
394*4882a593Smuzhiyun 					irq_chk_intv));
395*4882a593Smuzhiyun 	return ret;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun err_fc_exit:
398*4882a593Smuzhiyun 	flexcop_device_exit(fc);
399*4882a593Smuzhiyun err_pci_exit:
400*4882a593Smuzhiyun 	flexcop_pci_exit(fc_pci);
401*4882a593Smuzhiyun err_kfree:
402*4882a593Smuzhiyun 	flexcop_device_kfree(fc);
403*4882a593Smuzhiyun 	return ret;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun /* in theory every _exit function should be called exactly two times,
407*4882a593Smuzhiyun  * here and in the bail-out-part of the _init-function
408*4882a593Smuzhiyun  */
flexcop_pci_remove(struct pci_dev * pdev)409*4882a593Smuzhiyun static void flexcop_pci_remove(struct pci_dev *pdev)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (irq_chk_intv > 0)
414*4882a593Smuzhiyun 		cancel_delayed_work(&fc_pci->irq_check_work);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	flexcop_pci_dma_exit(fc_pci);
417*4882a593Smuzhiyun 	flexcop_device_exit(fc_pci->fc_dev);
418*4882a593Smuzhiyun 	flexcop_pci_exit(fc_pci);
419*4882a593Smuzhiyun 	flexcop_device_kfree(fc_pci->fc_dev);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun static const struct pci_device_id flexcop_pci_tbl[] = {
423*4882a593Smuzhiyun 	{ PCI_DEVICE(0x13d0, 0x2103) },
424*4882a593Smuzhiyun 	{ },
425*4882a593Smuzhiyun };
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, flexcop_pci_tbl);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun static struct pci_driver flexcop_pci_driver = {
430*4882a593Smuzhiyun 	.name     = "b2c2_flexcop_pci",
431*4882a593Smuzhiyun 	.id_table = flexcop_pci_tbl,
432*4882a593Smuzhiyun 	.probe    = flexcop_pci_probe,
433*4882a593Smuzhiyun 	.remove   = flexcop_pci_remove,
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun module_pci_driver(flexcop_pci_driver);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun MODULE_AUTHOR(DRIVER_AUTHOR);
439*4882a593Smuzhiyun MODULE_DESCRIPTION(DRIVER_NAME);
440*4882a593Smuzhiyun MODULE_LICENSE("GPL");
441