1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * pluto2.c - Satelco Easywatch Mobile Terrestrial Receiver [DVB-T]
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2005 Andreas Oberritter <obi@linuxtv.org>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * based on pluto2.c 1.10 - http://instinct-wp8.no-ip.org/pluto/
8*4882a593Smuzhiyun * by Dany Salman <salmandany@yahoo.fr>
9*4882a593Smuzhiyun * Copyright (c) 2004 TDF
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/i2c.h>
13*4882a593Smuzhiyun #include <linux/i2c-algo-bit.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <media/demux.h>
23*4882a593Smuzhiyun #include <media/dmxdev.h>
24*4882a593Smuzhiyun #include <media/dvb_demux.h>
25*4882a593Smuzhiyun #include <media/dvb_frontend.h>
26*4882a593Smuzhiyun #include <media/dvb_net.h>
27*4882a593Smuzhiyun #include <media/dvbdev.h>
28*4882a593Smuzhiyun #include "tda1004x.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define DRIVER_NAME "pluto2"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define REG_PIDn(n) ((n) << 2) /* PID n pattern registers */
35*4882a593Smuzhiyun #define REG_PCAR 0x0020 /* PC address register */
36*4882a593Smuzhiyun #define REG_TSCR 0x0024 /* TS ctrl & status */
37*4882a593Smuzhiyun #define REG_MISC 0x0028 /* miscellaneous */
38*4882a593Smuzhiyun #define REG_MMAC 0x002c /* MSB MAC address */
39*4882a593Smuzhiyun #define REG_IMAC 0x0030 /* ISB MAC address */
40*4882a593Smuzhiyun #define REG_LMAC 0x0034 /* LSB MAC address */
41*4882a593Smuzhiyun #define REG_SPID 0x0038 /* SPI data */
42*4882a593Smuzhiyun #define REG_SLCS 0x003c /* serial links ctrl/status */
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define PID0_NOFIL (0x0001 << 16)
45*4882a593Smuzhiyun #define PIDn_ENP (0x0001 << 15)
46*4882a593Smuzhiyun #define PID0_END (0x0001 << 14)
47*4882a593Smuzhiyun #define PID0_AFIL (0x0001 << 13)
48*4882a593Smuzhiyun #define PIDn_PID (0x1fff << 0)
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define TSCR_NBPACKETS (0x00ff << 24)
51*4882a593Smuzhiyun #define TSCR_DEM (0x0001 << 17)
52*4882a593Smuzhiyun #define TSCR_DE (0x0001 << 16)
53*4882a593Smuzhiyun #define TSCR_RSTN (0x0001 << 15)
54*4882a593Smuzhiyun #define TSCR_MSKO (0x0001 << 14)
55*4882a593Smuzhiyun #define TSCR_MSKA (0x0001 << 13)
56*4882a593Smuzhiyun #define TSCR_MSKL (0x0001 << 12)
57*4882a593Smuzhiyun #define TSCR_OVR (0x0001 << 11)
58*4882a593Smuzhiyun #define TSCR_AFUL (0x0001 << 10)
59*4882a593Smuzhiyun #define TSCR_LOCK (0x0001 << 9)
60*4882a593Smuzhiyun #define TSCR_IACK (0x0001 << 8)
61*4882a593Smuzhiyun #define TSCR_ADEF (0x007f << 0)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define MISC_DVR (0x0fff << 4)
64*4882a593Smuzhiyun #define MISC_ALED (0x0001 << 3)
65*4882a593Smuzhiyun #define MISC_FRST (0x0001 << 2)
66*4882a593Smuzhiyun #define MISC_LED1 (0x0001 << 1)
67*4882a593Smuzhiyun #define MISC_LED0 (0x0001 << 0)
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define SPID_SPIDR (0x00ff << 0)
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define SLCS_SCL (0x0001 << 7)
72*4882a593Smuzhiyun #define SLCS_SDA (0x0001 << 6)
73*4882a593Smuzhiyun #define SLCS_CSN (0x0001 << 2)
74*4882a593Smuzhiyun #define SLCS_OVR (0x0001 << 1)
75*4882a593Smuzhiyun #define SLCS_SWC (0x0001 << 0)
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define TS_DMA_PACKETS (8)
78*4882a593Smuzhiyun #define TS_DMA_BYTES (188 * TS_DMA_PACKETS)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define I2C_ADDR_TDA10046 0x10
81*4882a593Smuzhiyun #define I2C_ADDR_TUA6034 0xc2
82*4882a593Smuzhiyun #define NHWFILTERS 8
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun struct pluto {
85*4882a593Smuzhiyun /* pci */
86*4882a593Smuzhiyun struct pci_dev *pdev;
87*4882a593Smuzhiyun u8 __iomem *io_mem;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* dvb */
90*4882a593Smuzhiyun struct dmx_frontend hw_frontend;
91*4882a593Smuzhiyun struct dmx_frontend mem_frontend;
92*4882a593Smuzhiyun struct dmxdev dmxdev;
93*4882a593Smuzhiyun struct dvb_adapter dvb_adapter;
94*4882a593Smuzhiyun struct dvb_demux demux;
95*4882a593Smuzhiyun struct dvb_frontend *fe;
96*4882a593Smuzhiyun struct dvb_net dvbnet;
97*4882a593Smuzhiyun unsigned int full_ts_users;
98*4882a593Smuzhiyun unsigned int users;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* i2c */
101*4882a593Smuzhiyun struct i2c_algo_bit_data i2c_bit;
102*4882a593Smuzhiyun struct i2c_adapter i2c_adap;
103*4882a593Smuzhiyun unsigned int i2cbug;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* irq */
106*4882a593Smuzhiyun unsigned int overflow;
107*4882a593Smuzhiyun unsigned int dead;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* dma */
110*4882a593Smuzhiyun dma_addr_t dma_addr;
111*4882a593Smuzhiyun u8 dma_buf[TS_DMA_BYTES];
112*4882a593Smuzhiyun u8 dummy[4096];
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun
feed_to_pluto(struct dvb_demux_feed * feed)115*4882a593Smuzhiyun static inline struct pluto *feed_to_pluto(struct dvb_demux_feed *feed)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun return container_of(feed->demux, struct pluto, demux);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
frontend_to_pluto(struct dvb_frontend * fe)120*4882a593Smuzhiyun static inline struct pluto *frontend_to_pluto(struct dvb_frontend *fe)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun return container_of(fe->dvb, struct pluto, dvb_adapter);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
pluto_readreg(struct pluto * pluto,u32 reg)125*4882a593Smuzhiyun static inline u32 pluto_readreg(struct pluto *pluto, u32 reg)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun return readl(&pluto->io_mem[reg]);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
pluto_writereg(struct pluto * pluto,u32 reg,u32 val)130*4882a593Smuzhiyun static inline void pluto_writereg(struct pluto *pluto, u32 reg, u32 val)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun writel(val, &pluto->io_mem[reg]);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
pluto_rw(struct pluto * pluto,u32 reg,u32 mask,u32 bits)135*4882a593Smuzhiyun static inline void pluto_rw(struct pluto *pluto, u32 reg, u32 mask, u32 bits)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun u32 val = readl(&pluto->io_mem[reg]);
138*4882a593Smuzhiyun val &= ~mask;
139*4882a593Smuzhiyun val |= bits;
140*4882a593Smuzhiyun writel(val, &pluto->io_mem[reg]);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
pluto_write_tscr(struct pluto * pluto,u32 val)143*4882a593Smuzhiyun static void pluto_write_tscr(struct pluto *pluto, u32 val)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun /* set the number of packets */
146*4882a593Smuzhiyun val &= ~TSCR_ADEF;
147*4882a593Smuzhiyun val |= TS_DMA_PACKETS / 2;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun pluto_writereg(pluto, REG_TSCR, val);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
pluto_setsda(void * data,int state)152*4882a593Smuzhiyun static void pluto_setsda(void *data, int state)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct pluto *pluto = data;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (state)
157*4882a593Smuzhiyun pluto_rw(pluto, REG_SLCS, SLCS_SDA, SLCS_SDA);
158*4882a593Smuzhiyun else
159*4882a593Smuzhiyun pluto_rw(pluto, REG_SLCS, SLCS_SDA, 0);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
pluto_setscl(void * data,int state)162*4882a593Smuzhiyun static void pluto_setscl(void *data, int state)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct pluto *pluto = data;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (state)
167*4882a593Smuzhiyun pluto_rw(pluto, REG_SLCS, SLCS_SCL, SLCS_SCL);
168*4882a593Smuzhiyun else
169*4882a593Smuzhiyun pluto_rw(pluto, REG_SLCS, SLCS_SCL, 0);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* try to detect i2c_inb() to workaround hardware bug:
172*4882a593Smuzhiyun * reset SDA to high after SCL has been set to low */
173*4882a593Smuzhiyun if ((state) && (pluto->i2cbug == 0)) {
174*4882a593Smuzhiyun pluto->i2cbug = 1;
175*4882a593Smuzhiyun } else {
176*4882a593Smuzhiyun if ((!state) && (pluto->i2cbug == 1))
177*4882a593Smuzhiyun pluto_setsda(pluto, 1);
178*4882a593Smuzhiyun pluto->i2cbug = 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
pluto_getsda(void * data)182*4882a593Smuzhiyun static int pluto_getsda(void *data)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct pluto *pluto = data;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun return pluto_readreg(pluto, REG_SLCS) & SLCS_SDA;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
pluto_getscl(void * data)189*4882a593Smuzhiyun static int pluto_getscl(void *data)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct pluto *pluto = data;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return pluto_readreg(pluto, REG_SLCS) & SLCS_SCL;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
pluto_reset_frontend(struct pluto * pluto,int reenable)196*4882a593Smuzhiyun static void pluto_reset_frontend(struct pluto *pluto, int reenable)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun u32 val = pluto_readreg(pluto, REG_MISC);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (val & MISC_FRST) {
201*4882a593Smuzhiyun val &= ~MISC_FRST;
202*4882a593Smuzhiyun pluto_writereg(pluto, REG_MISC, val);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun if (reenable) {
205*4882a593Smuzhiyun val |= MISC_FRST;
206*4882a593Smuzhiyun pluto_writereg(pluto, REG_MISC, val);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
pluto_reset_ts(struct pluto * pluto,int reenable)210*4882a593Smuzhiyun static void pluto_reset_ts(struct pluto *pluto, int reenable)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun u32 val = pluto_readreg(pluto, REG_TSCR);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (val & TSCR_RSTN) {
215*4882a593Smuzhiyun val &= ~TSCR_RSTN;
216*4882a593Smuzhiyun pluto_write_tscr(pluto, val);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun if (reenable) {
219*4882a593Smuzhiyun val |= TSCR_RSTN;
220*4882a593Smuzhiyun pluto_write_tscr(pluto, val);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
pluto_set_dma_addr(struct pluto * pluto)224*4882a593Smuzhiyun static void pluto_set_dma_addr(struct pluto *pluto)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun pluto_writereg(pluto, REG_PCAR, pluto->dma_addr);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
pluto_dma_map(struct pluto * pluto)229*4882a593Smuzhiyun static int pluto_dma_map(struct pluto *pluto)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
232*4882a593Smuzhiyun TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
pluto_dma_unmap(struct pluto * pluto)237*4882a593Smuzhiyun static void pluto_dma_unmap(struct pluto *pluto)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun pci_unmap_single(pluto->pdev, pluto->dma_addr,
240*4882a593Smuzhiyun TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
pluto_start_feed(struct dvb_demux_feed * f)243*4882a593Smuzhiyun static int pluto_start_feed(struct dvb_demux_feed *f)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct pluto *pluto = feed_to_pluto(f);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* enable PID filtering */
248*4882a593Smuzhiyun if (pluto->users++ == 0)
249*4882a593Smuzhiyun pluto_rw(pluto, REG_PIDn(0), PID0_AFIL | PID0_NOFIL, 0);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if ((f->pid < 0x2000) && (f->index < NHWFILTERS))
252*4882a593Smuzhiyun pluto_rw(pluto, REG_PIDn(f->index), PIDn_ENP | PIDn_PID, PIDn_ENP | f->pid);
253*4882a593Smuzhiyun else if (pluto->full_ts_users++ == 0)
254*4882a593Smuzhiyun pluto_rw(pluto, REG_PIDn(0), PID0_NOFIL, PID0_NOFIL);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun return 0;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
pluto_stop_feed(struct dvb_demux_feed * f)259*4882a593Smuzhiyun static int pluto_stop_feed(struct dvb_demux_feed *f)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct pluto *pluto = feed_to_pluto(f);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* disable PID filtering */
264*4882a593Smuzhiyun if (--pluto->users == 0)
265*4882a593Smuzhiyun pluto_rw(pluto, REG_PIDn(0), PID0_AFIL, PID0_AFIL);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if ((f->pid < 0x2000) && (f->index < NHWFILTERS))
268*4882a593Smuzhiyun pluto_rw(pluto, REG_PIDn(f->index), PIDn_ENP | PIDn_PID, 0x1fff);
269*4882a593Smuzhiyun else if (--pluto->full_ts_users == 0)
270*4882a593Smuzhiyun pluto_rw(pluto, REG_PIDn(0), PID0_NOFIL, 0);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
pluto_dma_end(struct pluto * pluto,unsigned int nbpackets)275*4882a593Smuzhiyun static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun /* synchronize the DMA transfer with the CPU
278*4882a593Smuzhiyun * first so that we see updated contents. */
279*4882a593Smuzhiyun pci_dma_sync_single_for_cpu(pluto->pdev, pluto->dma_addr,
280*4882a593Smuzhiyun TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Workaround for broken hardware:
283*4882a593Smuzhiyun * [1] On startup NBPACKETS seems to contain an uninitialized value,
284*4882a593Smuzhiyun * but no packets have been transferred.
285*4882a593Smuzhiyun * [2] Sometimes (actually very often) NBPACKETS stays at zero
286*4882a593Smuzhiyun * although one packet has been transferred.
287*4882a593Smuzhiyun * [3] Sometimes (actually rarely), the card gets into an erroneous
288*4882a593Smuzhiyun * mode where it continuously generates interrupts, claiming it
289*4882a593Smuzhiyun * has received nbpackets>TS_DMA_PACKETS packets, but no packet
290*4882a593Smuzhiyun * has been transferred. Only a reset seems to solve this
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun if ((nbpackets == 0) || (nbpackets > TS_DMA_PACKETS)) {
293*4882a593Smuzhiyun unsigned int i = 0;
294*4882a593Smuzhiyun while (pluto->dma_buf[i] == 0x47)
295*4882a593Smuzhiyun i += 188;
296*4882a593Smuzhiyun nbpackets = i / 188;
297*4882a593Smuzhiyun if (i == 0) {
298*4882a593Smuzhiyun pluto_reset_ts(pluto, 1);
299*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &pluto->pdev->dev, "resetting TS because of invalid packet counter\n");
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun dvb_dmx_swfilter_packets(&pluto->demux, pluto->dma_buf, nbpackets);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* clear the dma buffer. this is needed to be able to identify
306*4882a593Smuzhiyun * new valid ts packets above */
307*4882a593Smuzhiyun memset(pluto->dma_buf, 0, nbpackets * 188);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* reset the dma address */
310*4882a593Smuzhiyun pluto_set_dma_addr(pluto);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* sync the buffer and give it back to the card */
313*4882a593Smuzhiyun pci_dma_sync_single_for_device(pluto->pdev, pluto->dma_addr,
314*4882a593Smuzhiyun TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
pluto_irq(int irq,void * dev_id)317*4882a593Smuzhiyun static irqreturn_t pluto_irq(int irq, void *dev_id)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun struct pluto *pluto = dev_id;
320*4882a593Smuzhiyun u32 tscr;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* check whether an interrupt occurred on this device */
323*4882a593Smuzhiyun tscr = pluto_readreg(pluto, REG_TSCR);
324*4882a593Smuzhiyun if (!(tscr & (TSCR_DE | TSCR_OVR)))
325*4882a593Smuzhiyun return IRQ_NONE;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (tscr == 0xffffffff) {
328*4882a593Smuzhiyun if (pluto->dead == 0)
329*4882a593Smuzhiyun dev_err(&pluto->pdev->dev, "card has hung or been ejected.\n");
330*4882a593Smuzhiyun /* It's dead Jim */
331*4882a593Smuzhiyun pluto->dead = 1;
332*4882a593Smuzhiyun return IRQ_HANDLED;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* dma end interrupt */
336*4882a593Smuzhiyun if (tscr & TSCR_DE) {
337*4882a593Smuzhiyun pluto_dma_end(pluto, (tscr & TSCR_NBPACKETS) >> 24);
338*4882a593Smuzhiyun /* overflow interrupt */
339*4882a593Smuzhiyun if (tscr & TSCR_OVR)
340*4882a593Smuzhiyun pluto->overflow++;
341*4882a593Smuzhiyun if (pluto->overflow) {
342*4882a593Smuzhiyun dev_err(&pluto->pdev->dev, "overflow irq (%d)\n",
343*4882a593Smuzhiyun pluto->overflow);
344*4882a593Smuzhiyun pluto_reset_ts(pluto, 1);
345*4882a593Smuzhiyun pluto->overflow = 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun } else if (tscr & TSCR_OVR) {
348*4882a593Smuzhiyun pluto->overflow++;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* ACK the interrupt */
352*4882a593Smuzhiyun pluto_write_tscr(pluto, tscr | TSCR_IACK);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun return IRQ_HANDLED;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
pluto_enable_irqs(struct pluto * pluto)357*4882a593Smuzhiyun static void pluto_enable_irqs(struct pluto *pluto)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun u32 val = pluto_readreg(pluto, REG_TSCR);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* disable AFUL and LOCK interrupts */
362*4882a593Smuzhiyun val |= (TSCR_MSKA | TSCR_MSKL);
363*4882a593Smuzhiyun /* enable DMA and OVERFLOW interrupts */
364*4882a593Smuzhiyun val &= ~(TSCR_DEM | TSCR_MSKO);
365*4882a593Smuzhiyun /* clear pending interrupts */
366*4882a593Smuzhiyun val |= TSCR_IACK;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun pluto_write_tscr(pluto, val);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
pluto_disable_irqs(struct pluto * pluto)371*4882a593Smuzhiyun static void pluto_disable_irqs(struct pluto *pluto)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun u32 val = pluto_readreg(pluto, REG_TSCR);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* disable all interrupts */
376*4882a593Smuzhiyun val |= (TSCR_DEM | TSCR_MSKO | TSCR_MSKA | TSCR_MSKL);
377*4882a593Smuzhiyun /* clear pending interrupts */
378*4882a593Smuzhiyun val |= TSCR_IACK;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun pluto_write_tscr(pluto, val);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
pluto_hw_init(struct pluto * pluto)383*4882a593Smuzhiyun static int pluto_hw_init(struct pluto *pluto)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun pluto_reset_frontend(pluto, 1);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* set automatic LED control by FPGA */
388*4882a593Smuzhiyun pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* set data endianness */
391*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
392*4882a593Smuzhiyun pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END);
393*4882a593Smuzhiyun #else
394*4882a593Smuzhiyun pluto_rw(pluto, REG_PIDn(0), PID0_END, 0);
395*4882a593Smuzhiyun #endif
396*4882a593Smuzhiyun /* map DMA and set address */
397*4882a593Smuzhiyun pluto_dma_map(pluto);
398*4882a593Smuzhiyun pluto_set_dma_addr(pluto);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* enable interrupts */
401*4882a593Smuzhiyun pluto_enable_irqs(pluto);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* reset TS logic */
404*4882a593Smuzhiyun pluto_reset_ts(pluto, 1);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
pluto_hw_exit(struct pluto * pluto)409*4882a593Smuzhiyun static void pluto_hw_exit(struct pluto *pluto)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun /* disable interrupts */
412*4882a593Smuzhiyun pluto_disable_irqs(pluto);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun pluto_reset_ts(pluto, 0);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* LED: disable automatic control, enable yellow, disable green */
417*4882a593Smuzhiyun pluto_rw(pluto, REG_MISC, MISC_ALED | MISC_LED1 | MISC_LED0, MISC_LED1);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* unmap DMA */
420*4882a593Smuzhiyun pluto_dma_unmap(pluto);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun pluto_reset_frontend(pluto, 0);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
divide(u32 numerator,u32 denominator)425*4882a593Smuzhiyun static inline u32 divide(u32 numerator, u32 denominator)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun if (denominator == 0)
428*4882a593Smuzhiyun return ~0;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun return DIV_ROUND_CLOSEST(numerator, denominator);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* LG Innotek TDTE-E001P (Infineon TUA6034) */
lg_tdtpe001p_tuner_set_params(struct dvb_frontend * fe)434*4882a593Smuzhiyun static int lg_tdtpe001p_tuner_set_params(struct dvb_frontend *fe)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun struct dtv_frontend_properties *p = &fe->dtv_property_cache;
437*4882a593Smuzhiyun struct pluto *pluto = frontend_to_pluto(fe);
438*4882a593Smuzhiyun struct i2c_msg msg;
439*4882a593Smuzhiyun int ret;
440*4882a593Smuzhiyun u8 buf[4];
441*4882a593Smuzhiyun u32 div;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun // Fref = 166.667 Hz
444*4882a593Smuzhiyun // Fref * 3 = 500.000 Hz
445*4882a593Smuzhiyun // IF = 36166667
446*4882a593Smuzhiyun // IF / Fref = 217
447*4882a593Smuzhiyun //div = divide(p->frequency + 36166667, 166667);
448*4882a593Smuzhiyun div = divide(p->frequency * 3, 500000) + 217;
449*4882a593Smuzhiyun buf[0] = (div >> 8) & 0x7f;
450*4882a593Smuzhiyun buf[1] = (div >> 0) & 0xff;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (p->frequency < 611000000)
453*4882a593Smuzhiyun buf[2] = 0xb4;
454*4882a593Smuzhiyun else if (p->frequency < 811000000)
455*4882a593Smuzhiyun buf[2] = 0xbc;
456*4882a593Smuzhiyun else
457*4882a593Smuzhiyun buf[2] = 0xf4;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun // VHF: 174-230 MHz
460*4882a593Smuzhiyun // center: 350 MHz
461*4882a593Smuzhiyun // UHF: 470-862 MHz
462*4882a593Smuzhiyun if (p->frequency < 350000000)
463*4882a593Smuzhiyun buf[3] = 0x02;
464*4882a593Smuzhiyun else
465*4882a593Smuzhiyun buf[3] = 0x04;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (p->bandwidth_hz == 8000000)
468*4882a593Smuzhiyun buf[3] |= 0x08;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun msg.addr = I2C_ADDR_TUA6034 >> 1;
471*4882a593Smuzhiyun msg.flags = 0;
472*4882a593Smuzhiyun msg.buf = buf;
473*4882a593Smuzhiyun msg.len = sizeof(buf);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (fe->ops.i2c_gate_ctrl)
476*4882a593Smuzhiyun fe->ops.i2c_gate_ctrl(fe, 1);
477*4882a593Smuzhiyun ret = i2c_transfer(&pluto->i2c_adap, &msg, 1);
478*4882a593Smuzhiyun if (ret < 0)
479*4882a593Smuzhiyun return ret;
480*4882a593Smuzhiyun else if (ret == 0)
481*4882a593Smuzhiyun return -EREMOTEIO;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun return 0;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
pluto2_request_firmware(struct dvb_frontend * fe,const struct firmware ** fw,char * name)486*4882a593Smuzhiyun static int pluto2_request_firmware(struct dvb_frontend *fe,
487*4882a593Smuzhiyun const struct firmware **fw, char *name)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun struct pluto *pluto = frontend_to_pluto(fe);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun return request_firmware(fw, name, &pluto->pdev->dev);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun static struct tda1004x_config pluto2_fe_config = {
495*4882a593Smuzhiyun .demod_address = I2C_ADDR_TDA10046 >> 1,
496*4882a593Smuzhiyun .invert = 1,
497*4882a593Smuzhiyun .invert_oclk = 0,
498*4882a593Smuzhiyun .xtal_freq = TDA10046_XTAL_16M,
499*4882a593Smuzhiyun .agc_config = TDA10046_AGC_DEFAULT,
500*4882a593Smuzhiyun .if_freq = TDA10046_FREQ_3617,
501*4882a593Smuzhiyun .request_firmware = pluto2_request_firmware,
502*4882a593Smuzhiyun };
503*4882a593Smuzhiyun
frontend_init(struct pluto * pluto)504*4882a593Smuzhiyun static int frontend_init(struct pluto *pluto)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun int ret;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun pluto->fe = tda10046_attach(&pluto2_fe_config, &pluto->i2c_adap);
509*4882a593Smuzhiyun if (!pluto->fe) {
510*4882a593Smuzhiyun dev_err(&pluto->pdev->dev, "could not attach frontend\n");
511*4882a593Smuzhiyun return -ENODEV;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun pluto->fe->ops.tuner_ops.set_params = lg_tdtpe001p_tuner_set_params;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun ret = dvb_register_frontend(&pluto->dvb_adapter, pluto->fe);
516*4882a593Smuzhiyun if (ret < 0) {
517*4882a593Smuzhiyun if (pluto->fe->ops.release)
518*4882a593Smuzhiyun pluto->fe->ops.release(pluto->fe);
519*4882a593Smuzhiyun return ret;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun return 0;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
pluto_read_rev(struct pluto * pluto)525*4882a593Smuzhiyun static void pluto_read_rev(struct pluto *pluto)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun u32 val = pluto_readreg(pluto, REG_MISC) & MISC_DVR;
528*4882a593Smuzhiyun dev_info(&pluto->pdev->dev, "board revision %d.%d\n",
529*4882a593Smuzhiyun (val >> 12) & 0x0f, (val >> 4) & 0xff);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
pluto_read_mac(struct pluto * pluto,u8 * mac)532*4882a593Smuzhiyun static void pluto_read_mac(struct pluto *pluto, u8 *mac)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun u32 val = pluto_readreg(pluto, REG_MMAC);
535*4882a593Smuzhiyun mac[0] = (val >> 8) & 0xff;
536*4882a593Smuzhiyun mac[1] = (val >> 0) & 0xff;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun val = pluto_readreg(pluto, REG_IMAC);
539*4882a593Smuzhiyun mac[2] = (val >> 8) & 0xff;
540*4882a593Smuzhiyun mac[3] = (val >> 0) & 0xff;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun val = pluto_readreg(pluto, REG_LMAC);
543*4882a593Smuzhiyun mac[4] = (val >> 8) & 0xff;
544*4882a593Smuzhiyun mac[5] = (val >> 0) & 0xff;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun dev_info(&pluto->pdev->dev, "MAC %pM\n", mac);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
pluto_read_serial(struct pluto * pluto)549*4882a593Smuzhiyun static int pluto_read_serial(struct pluto *pluto)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct pci_dev *pdev = pluto->pdev;
552*4882a593Smuzhiyun unsigned int i, j;
553*4882a593Smuzhiyun u8 __iomem *cis;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun cis = pci_iomap(pdev, 1, 0);
556*4882a593Smuzhiyun if (!cis)
557*4882a593Smuzhiyun return -EIO;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun dev_info(&pdev->dev, "S/N ");
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun for (i = 0xe0; i < 0x100; i += 4) {
562*4882a593Smuzhiyun u32 val = readl(&cis[i]);
563*4882a593Smuzhiyun for (j = 0; j < 32; j += 8) {
564*4882a593Smuzhiyun if ((val & 0xff) == 0xff)
565*4882a593Smuzhiyun goto out;
566*4882a593Smuzhiyun printk(KERN_CONT "%c", val & 0xff);
567*4882a593Smuzhiyun val >>= 8;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun out:
571*4882a593Smuzhiyun printk(KERN_CONT "\n");
572*4882a593Smuzhiyun pci_iounmap(pdev, cis);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
pluto2_probe(struct pci_dev * pdev,const struct pci_device_id * ent)577*4882a593Smuzhiyun static int pluto2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun struct pluto *pluto;
580*4882a593Smuzhiyun struct dvb_adapter *dvb_adapter;
581*4882a593Smuzhiyun struct dvb_demux *dvbdemux;
582*4882a593Smuzhiyun struct dmx_demux *dmx;
583*4882a593Smuzhiyun int ret = -ENOMEM;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun pluto = kzalloc(sizeof(struct pluto), GFP_KERNEL);
586*4882a593Smuzhiyun if (!pluto)
587*4882a593Smuzhiyun goto out;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun pluto->pdev = pdev;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun ret = pci_enable_device(pdev);
592*4882a593Smuzhiyun if (ret < 0)
593*4882a593Smuzhiyun goto err_kfree;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* enable interrupts */
596*4882a593Smuzhiyun pci_write_config_dword(pdev, 0x6c, 0x8000);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
599*4882a593Smuzhiyun if (ret < 0)
600*4882a593Smuzhiyun goto err_pci_disable_device;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun pci_set_master(pdev);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun ret = pci_request_regions(pdev, DRIVER_NAME);
605*4882a593Smuzhiyun if (ret < 0)
606*4882a593Smuzhiyun goto err_pci_disable_device;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun pluto->io_mem = pci_iomap(pdev, 0, 0x40);
609*4882a593Smuzhiyun if (!pluto->io_mem) {
610*4882a593Smuzhiyun ret = -EIO;
611*4882a593Smuzhiyun goto err_pci_release_regions;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun pci_set_drvdata(pdev, pluto);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun ret = request_irq(pdev->irq, pluto_irq, IRQF_SHARED, DRIVER_NAME, pluto);
617*4882a593Smuzhiyun if (ret < 0)
618*4882a593Smuzhiyun goto err_pci_iounmap;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun ret = pluto_hw_init(pluto);
621*4882a593Smuzhiyun if (ret < 0)
622*4882a593Smuzhiyun goto err_free_irq;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /* i2c */
625*4882a593Smuzhiyun i2c_set_adapdata(&pluto->i2c_adap, pluto);
626*4882a593Smuzhiyun strscpy(pluto->i2c_adap.name, DRIVER_NAME, sizeof(pluto->i2c_adap.name));
627*4882a593Smuzhiyun pluto->i2c_adap.owner = THIS_MODULE;
628*4882a593Smuzhiyun pluto->i2c_adap.dev.parent = &pdev->dev;
629*4882a593Smuzhiyun pluto->i2c_adap.algo_data = &pluto->i2c_bit;
630*4882a593Smuzhiyun pluto->i2c_bit.data = pluto;
631*4882a593Smuzhiyun pluto->i2c_bit.setsda = pluto_setsda;
632*4882a593Smuzhiyun pluto->i2c_bit.setscl = pluto_setscl;
633*4882a593Smuzhiyun pluto->i2c_bit.getsda = pluto_getsda;
634*4882a593Smuzhiyun pluto->i2c_bit.getscl = pluto_getscl;
635*4882a593Smuzhiyun pluto->i2c_bit.udelay = 10;
636*4882a593Smuzhiyun pluto->i2c_bit.timeout = 10;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /* Raise SCL and SDA */
639*4882a593Smuzhiyun pluto_setsda(pluto, 1);
640*4882a593Smuzhiyun pluto_setscl(pluto, 1);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun ret = i2c_bit_add_bus(&pluto->i2c_adap);
643*4882a593Smuzhiyun if (ret < 0)
644*4882a593Smuzhiyun goto err_pluto_hw_exit;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /* dvb */
647*4882a593Smuzhiyun ret = dvb_register_adapter(&pluto->dvb_adapter, DRIVER_NAME,
648*4882a593Smuzhiyun THIS_MODULE, &pdev->dev, adapter_nr);
649*4882a593Smuzhiyun if (ret < 0)
650*4882a593Smuzhiyun goto err_i2c_del_adapter;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun dvb_adapter = &pluto->dvb_adapter;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun pluto_read_rev(pluto);
655*4882a593Smuzhiyun pluto_read_serial(pluto);
656*4882a593Smuzhiyun pluto_read_mac(pluto, dvb_adapter->proposed_mac);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun dvbdemux = &pluto->demux;
659*4882a593Smuzhiyun dvbdemux->filternum = 256;
660*4882a593Smuzhiyun dvbdemux->feednum = 256;
661*4882a593Smuzhiyun dvbdemux->start_feed = pluto_start_feed;
662*4882a593Smuzhiyun dvbdemux->stop_feed = pluto_stop_feed;
663*4882a593Smuzhiyun dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
664*4882a593Smuzhiyun DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING);
665*4882a593Smuzhiyun ret = dvb_dmx_init(dvbdemux);
666*4882a593Smuzhiyun if (ret < 0)
667*4882a593Smuzhiyun goto err_dvb_unregister_adapter;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun dmx = &dvbdemux->dmx;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun pluto->hw_frontend.source = DMX_FRONTEND_0;
672*4882a593Smuzhiyun pluto->mem_frontend.source = DMX_MEMORY_FE;
673*4882a593Smuzhiyun pluto->dmxdev.filternum = NHWFILTERS;
674*4882a593Smuzhiyun pluto->dmxdev.demux = dmx;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun ret = dvb_dmxdev_init(&pluto->dmxdev, dvb_adapter);
677*4882a593Smuzhiyun if (ret < 0)
678*4882a593Smuzhiyun goto err_dvb_dmx_release;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun ret = dmx->add_frontend(dmx, &pluto->hw_frontend);
681*4882a593Smuzhiyun if (ret < 0)
682*4882a593Smuzhiyun goto err_dvb_dmxdev_release;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun ret = dmx->add_frontend(dmx, &pluto->mem_frontend);
685*4882a593Smuzhiyun if (ret < 0)
686*4882a593Smuzhiyun goto err_remove_hw_frontend;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun ret = dmx->connect_frontend(dmx, &pluto->hw_frontend);
689*4882a593Smuzhiyun if (ret < 0)
690*4882a593Smuzhiyun goto err_remove_mem_frontend;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun ret = frontend_init(pluto);
693*4882a593Smuzhiyun if (ret < 0)
694*4882a593Smuzhiyun goto err_disconnect_frontend;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun dvb_net_init(dvb_adapter, &pluto->dvbnet, dmx);
697*4882a593Smuzhiyun out:
698*4882a593Smuzhiyun return ret;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun err_disconnect_frontend:
701*4882a593Smuzhiyun dmx->disconnect_frontend(dmx);
702*4882a593Smuzhiyun err_remove_mem_frontend:
703*4882a593Smuzhiyun dmx->remove_frontend(dmx, &pluto->mem_frontend);
704*4882a593Smuzhiyun err_remove_hw_frontend:
705*4882a593Smuzhiyun dmx->remove_frontend(dmx, &pluto->hw_frontend);
706*4882a593Smuzhiyun err_dvb_dmxdev_release:
707*4882a593Smuzhiyun dvb_dmxdev_release(&pluto->dmxdev);
708*4882a593Smuzhiyun err_dvb_dmx_release:
709*4882a593Smuzhiyun dvb_dmx_release(dvbdemux);
710*4882a593Smuzhiyun err_dvb_unregister_adapter:
711*4882a593Smuzhiyun dvb_unregister_adapter(dvb_adapter);
712*4882a593Smuzhiyun err_i2c_del_adapter:
713*4882a593Smuzhiyun i2c_del_adapter(&pluto->i2c_adap);
714*4882a593Smuzhiyun err_pluto_hw_exit:
715*4882a593Smuzhiyun pluto_hw_exit(pluto);
716*4882a593Smuzhiyun err_free_irq:
717*4882a593Smuzhiyun free_irq(pdev->irq, pluto);
718*4882a593Smuzhiyun err_pci_iounmap:
719*4882a593Smuzhiyun pci_iounmap(pdev, pluto->io_mem);
720*4882a593Smuzhiyun err_pci_release_regions:
721*4882a593Smuzhiyun pci_release_regions(pdev);
722*4882a593Smuzhiyun err_pci_disable_device:
723*4882a593Smuzhiyun pci_disable_device(pdev);
724*4882a593Smuzhiyun err_kfree:
725*4882a593Smuzhiyun kfree(pluto);
726*4882a593Smuzhiyun goto out;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
pluto2_remove(struct pci_dev * pdev)729*4882a593Smuzhiyun static void pluto2_remove(struct pci_dev *pdev)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun struct pluto *pluto = pci_get_drvdata(pdev);
732*4882a593Smuzhiyun struct dvb_adapter *dvb_adapter = &pluto->dvb_adapter;
733*4882a593Smuzhiyun struct dvb_demux *dvbdemux = &pluto->demux;
734*4882a593Smuzhiyun struct dmx_demux *dmx = &dvbdemux->dmx;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun dmx->close(dmx);
737*4882a593Smuzhiyun dvb_net_release(&pluto->dvbnet);
738*4882a593Smuzhiyun if (pluto->fe)
739*4882a593Smuzhiyun dvb_unregister_frontend(pluto->fe);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun dmx->disconnect_frontend(dmx);
742*4882a593Smuzhiyun dmx->remove_frontend(dmx, &pluto->mem_frontend);
743*4882a593Smuzhiyun dmx->remove_frontend(dmx, &pluto->hw_frontend);
744*4882a593Smuzhiyun dvb_dmxdev_release(&pluto->dmxdev);
745*4882a593Smuzhiyun dvb_dmx_release(dvbdemux);
746*4882a593Smuzhiyun dvb_unregister_adapter(dvb_adapter);
747*4882a593Smuzhiyun i2c_del_adapter(&pluto->i2c_adap);
748*4882a593Smuzhiyun pluto_hw_exit(pluto);
749*4882a593Smuzhiyun free_irq(pdev->irq, pluto);
750*4882a593Smuzhiyun pci_iounmap(pdev, pluto->io_mem);
751*4882a593Smuzhiyun pci_release_regions(pdev);
752*4882a593Smuzhiyun pci_disable_device(pdev);
753*4882a593Smuzhiyun kfree(pluto);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun #ifndef PCI_VENDOR_ID_SCM
757*4882a593Smuzhiyun #define PCI_VENDOR_ID_SCM 0x0432
758*4882a593Smuzhiyun #endif
759*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_PLUTO2
760*4882a593Smuzhiyun #define PCI_DEVICE_ID_PLUTO2 0x0001
761*4882a593Smuzhiyun #endif
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun static const struct pci_device_id pluto2_id_table[] = {
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun .vendor = PCI_VENDOR_ID_SCM,
766*4882a593Smuzhiyun .device = PCI_DEVICE_ID_PLUTO2,
767*4882a593Smuzhiyun .subvendor = PCI_ANY_ID,
768*4882a593Smuzhiyun .subdevice = PCI_ANY_ID,
769*4882a593Smuzhiyun }, {
770*4882a593Smuzhiyun /* empty */
771*4882a593Smuzhiyun },
772*4882a593Smuzhiyun };
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, pluto2_id_table);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun static struct pci_driver pluto2_driver = {
777*4882a593Smuzhiyun .name = DRIVER_NAME,
778*4882a593Smuzhiyun .id_table = pluto2_id_table,
779*4882a593Smuzhiyun .probe = pluto2_probe,
780*4882a593Smuzhiyun .remove = pluto2_remove,
781*4882a593Smuzhiyun };
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun module_pci_driver(pluto2_driver);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun MODULE_AUTHOR("Andreas Oberritter <obi@linuxtv.org>");
786*4882a593Smuzhiyun MODULE_DESCRIPTION("Pluto2 driver");
787*4882a593Smuzhiyun MODULE_LICENSE("GPL");
788