xref: /OK3568_Linux_fs/kernel/drivers/isdn/hardware/mISDN/netjet.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * NETJet mISDN driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author       Karsten Keil <keil@isdn4linux.de>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright 2009  by Karsten Keil <keil@isdn4linux.de>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/mISDNhw.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include "ipac.h"
17*4882a593Smuzhiyun #include "iohelper.h"
18*4882a593Smuzhiyun #include "netjet.h"
19*4882a593Smuzhiyun #include "isdnhdlc.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define NETJET_REV	"2.0"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun enum nj_types {
24*4882a593Smuzhiyun 	NETJET_S_TJ300,
25*4882a593Smuzhiyun 	NETJET_S_TJ320,
26*4882a593Smuzhiyun 	ENTERNOW__TJ320,
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct tiger_dma {
30*4882a593Smuzhiyun 	size_t		size;
31*4882a593Smuzhiyun 	u32		*start;
32*4882a593Smuzhiyun 	int		idx;
33*4882a593Smuzhiyun 	u32		dmastart;
34*4882a593Smuzhiyun 	u32		dmairq;
35*4882a593Smuzhiyun 	u32		dmaend;
36*4882a593Smuzhiyun 	u32		dmacur;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun struct tiger_hw;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct tiger_ch {
42*4882a593Smuzhiyun 	struct bchannel		bch;
43*4882a593Smuzhiyun 	struct tiger_hw		*nj;
44*4882a593Smuzhiyun 	int			idx;
45*4882a593Smuzhiyun 	int			free;
46*4882a593Smuzhiyun 	int			lastrx;
47*4882a593Smuzhiyun 	u16			rxstate;
48*4882a593Smuzhiyun 	u16			txstate;
49*4882a593Smuzhiyun 	struct isdnhdlc_vars	hsend;
50*4882a593Smuzhiyun 	struct isdnhdlc_vars	hrecv;
51*4882a593Smuzhiyun 	u8			*hsbuf;
52*4882a593Smuzhiyun 	u8			*hrbuf;
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define TX_INIT		0x0001
56*4882a593Smuzhiyun #define TX_IDLE		0x0002
57*4882a593Smuzhiyun #define TX_RUN		0x0004
58*4882a593Smuzhiyun #define TX_UNDERRUN	0x0100
59*4882a593Smuzhiyun #define RX_OVERRUN	0x0100
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define LOG_SIZE	64
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun struct tiger_hw {
64*4882a593Smuzhiyun 	struct list_head	list;
65*4882a593Smuzhiyun 	struct pci_dev		*pdev;
66*4882a593Smuzhiyun 	char			name[MISDN_MAX_IDLEN];
67*4882a593Smuzhiyun 	enum nj_types		typ;
68*4882a593Smuzhiyun 	int			irq;
69*4882a593Smuzhiyun 	u32			irqcnt;
70*4882a593Smuzhiyun 	u32			base;
71*4882a593Smuzhiyun 	size_t			base_s;
72*4882a593Smuzhiyun 	dma_addr_t		dma;
73*4882a593Smuzhiyun 	void			*dma_p;
74*4882a593Smuzhiyun 	spinlock_t		lock;	/* lock HW */
75*4882a593Smuzhiyun 	struct isac_hw		isac;
76*4882a593Smuzhiyun 	struct tiger_dma	send;
77*4882a593Smuzhiyun 	struct tiger_dma	recv;
78*4882a593Smuzhiyun 	struct tiger_ch		bc[2];
79*4882a593Smuzhiyun 	u8			ctrlreg;
80*4882a593Smuzhiyun 	u8			dmactrl;
81*4882a593Smuzhiyun 	u8			auxd;
82*4882a593Smuzhiyun 	u8			last_is0;
83*4882a593Smuzhiyun 	u8			irqmask0;
84*4882a593Smuzhiyun 	char			log[LOG_SIZE];
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun static LIST_HEAD(Cards);
88*4882a593Smuzhiyun static DEFINE_RWLOCK(card_lock); /* protect Cards */
89*4882a593Smuzhiyun static u32 debug;
90*4882a593Smuzhiyun static int nj_cnt;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun static void
_set_debug(struct tiger_hw * card)93*4882a593Smuzhiyun _set_debug(struct tiger_hw *card)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	card->isac.dch.debug = debug;
96*4882a593Smuzhiyun 	card->bc[0].bch.debug = debug;
97*4882a593Smuzhiyun 	card->bc[1].bch.debug = debug;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static int
set_debug(const char * val,const struct kernel_param * kp)101*4882a593Smuzhiyun set_debug(const char *val, const struct kernel_param *kp)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	int ret;
104*4882a593Smuzhiyun 	struct tiger_hw *card;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	ret = param_set_uint(val, kp);
107*4882a593Smuzhiyun 	if (!ret) {
108*4882a593Smuzhiyun 		read_lock(&card_lock);
109*4882a593Smuzhiyun 		list_for_each_entry(card, &Cards, list)
110*4882a593Smuzhiyun 			_set_debug(card);
111*4882a593Smuzhiyun 		read_unlock(&card_lock);
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 	return ret;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun MODULE_AUTHOR("Karsten Keil");
117*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
118*4882a593Smuzhiyun MODULE_VERSION(NETJET_REV);
119*4882a593Smuzhiyun module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
120*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Netjet debug mask");
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun static void
nj_disable_hwirq(struct tiger_hw * card)123*4882a593Smuzhiyun nj_disable_hwirq(struct tiger_hw *card)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	outb(0, card->base + NJ_IRQMASK0);
126*4882a593Smuzhiyun 	outb(0, card->base + NJ_IRQMASK1);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun static u8
ReadISAC_nj(void * p,u8 offset)131*4882a593Smuzhiyun ReadISAC_nj(void *p, u8 offset)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct tiger_hw *card = p;
134*4882a593Smuzhiyun 	u8 ret;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	card->auxd &= 0xfc;
137*4882a593Smuzhiyun 	card->auxd |= (offset >> 4) & 3;
138*4882a593Smuzhiyun 	outb(card->auxd, card->base + NJ_AUXDATA);
139*4882a593Smuzhiyun 	ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
140*4882a593Smuzhiyun 	return ret;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun static void
WriteISAC_nj(void * p,u8 offset,u8 value)144*4882a593Smuzhiyun WriteISAC_nj(void *p, u8 offset, u8 value)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct tiger_hw *card = p;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	card->auxd &= 0xfc;
149*4882a593Smuzhiyun 	card->auxd |= (offset >> 4) & 3;
150*4882a593Smuzhiyun 	outb(card->auxd, card->base + NJ_AUXDATA);
151*4882a593Smuzhiyun 	outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun static void
ReadFiFoISAC_nj(void * p,u8 offset,u8 * data,int size)155*4882a593Smuzhiyun ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct tiger_hw *card = p;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	card->auxd &= 0xfc;
160*4882a593Smuzhiyun 	outb(card->auxd, card->base + NJ_AUXDATA);
161*4882a593Smuzhiyun 	insb(card->base + NJ_ISAC_OFF, data, size);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun static void
WriteFiFoISAC_nj(void * p,u8 offset,u8 * data,int size)165*4882a593Smuzhiyun WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	struct tiger_hw *card = p;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	card->auxd &= 0xfc;
170*4882a593Smuzhiyun 	outb(card->auxd, card->base + NJ_AUXDATA);
171*4882a593Smuzhiyun 	outsb(card->base + NJ_ISAC_OFF, data, size);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun static void
fill_mem(struct tiger_ch * bc,u32 idx,u32 cnt,u32 fill)175*4882a593Smuzhiyun fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct tiger_hw *card = bc->bch.hw;
178*4882a593Smuzhiyun 	u32 mask = 0xff, val;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
181*4882a593Smuzhiyun 		 bc->bch.nr, fill, cnt, idx, card->send.idx);
182*4882a593Smuzhiyun 	if (bc->bch.nr & 2) {
183*4882a593Smuzhiyun 		fill  <<= 8;
184*4882a593Smuzhiyun 		mask <<= 8;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 	mask ^= 0xffffffff;
187*4882a593Smuzhiyun 	while (cnt--) {
188*4882a593Smuzhiyun 		val = card->send.start[idx];
189*4882a593Smuzhiyun 		val &= mask;
190*4882a593Smuzhiyun 		val |= fill;
191*4882a593Smuzhiyun 		card->send.start[idx++] = val;
192*4882a593Smuzhiyun 		if (idx >= card->send.size)
193*4882a593Smuzhiyun 			idx = 0;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun static int
mode_tiger(struct tiger_ch * bc,u32 protocol)198*4882a593Smuzhiyun mode_tiger(struct tiger_ch *bc, u32 protocol)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct tiger_hw *card = bc->bch.hw;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
203*4882a593Smuzhiyun 		 bc->bch.nr, bc->bch.state, protocol);
204*4882a593Smuzhiyun 	switch (protocol) {
205*4882a593Smuzhiyun 	case ISDN_P_NONE:
206*4882a593Smuzhiyun 		if (bc->bch.state == ISDN_P_NONE)
207*4882a593Smuzhiyun 			break;
208*4882a593Smuzhiyun 		fill_mem(bc, 0, card->send.size, 0xff);
209*4882a593Smuzhiyun 		bc->bch.state = protocol;
210*4882a593Smuzhiyun 		/* only stop dma and interrupts if both channels NULL */
211*4882a593Smuzhiyun 		if ((card->bc[0].bch.state == ISDN_P_NONE) &&
212*4882a593Smuzhiyun 		    (card->bc[1].bch.state == ISDN_P_NONE)) {
213*4882a593Smuzhiyun 			card->dmactrl = 0;
214*4882a593Smuzhiyun 			outb(card->dmactrl, card->base + NJ_DMACTRL);
215*4882a593Smuzhiyun 			outb(0, card->base + NJ_IRQMASK0);
216*4882a593Smuzhiyun 		}
217*4882a593Smuzhiyun 		test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
218*4882a593Smuzhiyun 		test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
219*4882a593Smuzhiyun 		bc->txstate = 0;
220*4882a593Smuzhiyun 		bc->rxstate = 0;
221*4882a593Smuzhiyun 		bc->lastrx = -1;
222*4882a593Smuzhiyun 		break;
223*4882a593Smuzhiyun 	case ISDN_P_B_RAW:
224*4882a593Smuzhiyun 		test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
225*4882a593Smuzhiyun 		bc->bch.state = protocol;
226*4882a593Smuzhiyun 		bc->idx = 0;
227*4882a593Smuzhiyun 		bc->free = card->send.size / 2;
228*4882a593Smuzhiyun 		bc->rxstate = 0;
229*4882a593Smuzhiyun 		bc->txstate = TX_INIT | TX_IDLE;
230*4882a593Smuzhiyun 		bc->lastrx = -1;
231*4882a593Smuzhiyun 		if (!card->dmactrl) {
232*4882a593Smuzhiyun 			card->dmactrl = 1;
233*4882a593Smuzhiyun 			outb(card->dmactrl, card->base + NJ_DMACTRL);
234*4882a593Smuzhiyun 			outb(0x0f, card->base + NJ_IRQMASK0);
235*4882a593Smuzhiyun 		}
236*4882a593Smuzhiyun 		break;
237*4882a593Smuzhiyun 	case ISDN_P_B_HDLC:
238*4882a593Smuzhiyun 		test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
239*4882a593Smuzhiyun 		bc->bch.state = protocol;
240*4882a593Smuzhiyun 		bc->idx = 0;
241*4882a593Smuzhiyun 		bc->free = card->send.size / 2;
242*4882a593Smuzhiyun 		bc->rxstate = 0;
243*4882a593Smuzhiyun 		bc->txstate = TX_INIT | TX_IDLE;
244*4882a593Smuzhiyun 		isdnhdlc_rcv_init(&bc->hrecv, 0);
245*4882a593Smuzhiyun 		isdnhdlc_out_init(&bc->hsend, 0);
246*4882a593Smuzhiyun 		bc->lastrx = -1;
247*4882a593Smuzhiyun 		if (!card->dmactrl) {
248*4882a593Smuzhiyun 			card->dmactrl = 1;
249*4882a593Smuzhiyun 			outb(card->dmactrl, card->base + NJ_DMACTRL);
250*4882a593Smuzhiyun 			outb(0x0f, card->base + NJ_IRQMASK0);
251*4882a593Smuzhiyun 		}
252*4882a593Smuzhiyun 		break;
253*4882a593Smuzhiyun 	default:
254*4882a593Smuzhiyun 		pr_info("%s: %s protocol %x not handled\n", card->name,
255*4882a593Smuzhiyun 			__func__, protocol);
256*4882a593Smuzhiyun 		return -ENOPROTOOPT;
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 	card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
259*4882a593Smuzhiyun 	card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
260*4882a593Smuzhiyun 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
261*4882a593Smuzhiyun 	card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
262*4882a593Smuzhiyun 	pr_debug("%s: %s ctrl %x irq  %02x/%02x idx %d/%d\n",
263*4882a593Smuzhiyun 		 card->name, __func__,
264*4882a593Smuzhiyun 		 inb(card->base + NJ_DMACTRL),
265*4882a593Smuzhiyun 		 inb(card->base + NJ_IRQMASK0),
266*4882a593Smuzhiyun 		 inb(card->base + NJ_IRQSTAT0),
267*4882a593Smuzhiyun 		 card->send.idx,
268*4882a593Smuzhiyun 		 card->recv.idx);
269*4882a593Smuzhiyun 	return 0;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun static void
nj_reset(struct tiger_hw * card)273*4882a593Smuzhiyun nj_reset(struct tiger_hw *card)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	outb(0xff, card->base + NJ_CTRL); /* Reset On */
276*4882a593Smuzhiyun 	mdelay(1);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/* now edge triggered for TJ320 GE 13/07/00 */
279*4882a593Smuzhiyun 	/* see comment in IRQ function */
280*4882a593Smuzhiyun 	if (card->typ == NETJET_S_TJ320) /* TJ320 */
281*4882a593Smuzhiyun 		card->ctrlreg = 0x40;  /* Reset Off and status read clear */
282*4882a593Smuzhiyun 	else
283*4882a593Smuzhiyun 		card->ctrlreg = 0x00;  /* Reset Off and status read clear */
284*4882a593Smuzhiyun 	outb(card->ctrlreg, card->base + NJ_CTRL);
285*4882a593Smuzhiyun 	mdelay(10);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* configure AUX pins (all output except ISAC IRQ pin) */
288*4882a593Smuzhiyun 	card->auxd = 0;
289*4882a593Smuzhiyun 	card->dmactrl = 0;
290*4882a593Smuzhiyun 	outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
291*4882a593Smuzhiyun 	outb(NJ_ISACIRQ,  card->base + NJ_IRQMASK1);
292*4882a593Smuzhiyun 	outb(card->auxd, card->base + NJ_AUXDATA);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun static int
inittiger(struct tiger_hw * card)296*4882a593Smuzhiyun inittiger(struct tiger_hw *card)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	int i;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	card->dma_p = dma_alloc_coherent(&card->pdev->dev, NJ_DMA_SIZE,
301*4882a593Smuzhiyun 					 &card->dma, GFP_ATOMIC);
302*4882a593Smuzhiyun 	if (!card->dma_p) {
303*4882a593Smuzhiyun 		pr_info("%s: No DMA memory\n", card->name);
304*4882a593Smuzhiyun 		return -ENOMEM;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 	if ((u64)card->dma > 0xffffffff) {
307*4882a593Smuzhiyun 		pr_info("%s: DMA outside 32 bit\n", card->name);
308*4882a593Smuzhiyun 		return -ENOMEM;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
311*4882a593Smuzhiyun 		card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
312*4882a593Smuzhiyun 		if (!card->bc[i].hsbuf) {
313*4882a593Smuzhiyun 			pr_info("%s: no B%d send buffer\n", card->name, i + 1);
314*4882a593Smuzhiyun 			return -ENOMEM;
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 		card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
317*4882a593Smuzhiyun 		if (!card->bc[i].hrbuf) {
318*4882a593Smuzhiyun 			pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
319*4882a593Smuzhiyun 			return -ENOMEM;
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 	memset(card->dma_p, 0xff, NJ_DMA_SIZE);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	card->send.start = card->dma_p;
325*4882a593Smuzhiyun 	card->send.dmastart = (u32)card->dma;
326*4882a593Smuzhiyun 	card->send.dmaend = card->send.dmastart +
327*4882a593Smuzhiyun 		(4 * (NJ_DMA_TXSIZE - 1));
328*4882a593Smuzhiyun 	card->send.dmairq = card->send.dmastart +
329*4882a593Smuzhiyun 		(4 * ((NJ_DMA_TXSIZE / 2) - 1));
330*4882a593Smuzhiyun 	card->send.size = NJ_DMA_TXSIZE;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (debug & DEBUG_HW)
333*4882a593Smuzhiyun 		pr_notice("%s: send buffer phy %#x - %#x - %#x  virt %p"
334*4882a593Smuzhiyun 			  " size %zu u32\n", card->name,
335*4882a593Smuzhiyun 			  card->send.dmastart, card->send.dmairq,
336*4882a593Smuzhiyun 			  card->send.dmaend, card->send.start, card->send.size);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
339*4882a593Smuzhiyun 	outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
340*4882a593Smuzhiyun 	outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
343*4882a593Smuzhiyun 	card->recv.dmastart = (u32)card->dma  + (NJ_DMA_SIZE / 2);
344*4882a593Smuzhiyun 	card->recv.dmaend = card->recv.dmastart +
345*4882a593Smuzhiyun 		(4 * (NJ_DMA_RXSIZE - 1));
346*4882a593Smuzhiyun 	card->recv.dmairq = card->recv.dmastart +
347*4882a593Smuzhiyun 		(4 * ((NJ_DMA_RXSIZE / 2) - 1));
348*4882a593Smuzhiyun 	card->recv.size = NJ_DMA_RXSIZE;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if (debug & DEBUG_HW)
351*4882a593Smuzhiyun 		pr_notice("%s: recv buffer phy %#x - %#x - %#x  virt %p"
352*4882a593Smuzhiyun 			  " size %zu u32\n", card->name,
353*4882a593Smuzhiyun 			  card->recv.dmastart, card->recv.dmairq,
354*4882a593Smuzhiyun 			  card->recv.dmaend, card->recv.start, card->recv.size);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
357*4882a593Smuzhiyun 	outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
358*4882a593Smuzhiyun 	outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
359*4882a593Smuzhiyun 	return 0;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun static void
read_dma(struct tiger_ch * bc,u32 idx,int cnt)363*4882a593Smuzhiyun read_dma(struct tiger_ch *bc, u32 idx, int cnt)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	struct tiger_hw *card = bc->bch.hw;
366*4882a593Smuzhiyun 	int i, stat;
367*4882a593Smuzhiyun 	u32 val;
368*4882a593Smuzhiyun 	u8 *p, *pn;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	if (bc->lastrx == idx) {
371*4882a593Smuzhiyun 		bc->rxstate |= RX_OVERRUN;
372*4882a593Smuzhiyun 		pr_info("%s: B%1d overrun at idx %d\n", card->name,
373*4882a593Smuzhiyun 			bc->bch.nr, idx);
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 	bc->lastrx = idx;
376*4882a593Smuzhiyun 	if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
377*4882a593Smuzhiyun 		bc->bch.dropcnt += cnt;
378*4882a593Smuzhiyun 		return;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 	stat = bchannel_get_rxbuf(&bc->bch, cnt);
381*4882a593Smuzhiyun 	/* only transparent use the count here, HDLC overun is detected later */
382*4882a593Smuzhiyun 	if (stat == -ENOMEM) {
383*4882a593Smuzhiyun 		pr_warn("%s.B%d: No memory for %d bytes\n",
384*4882a593Smuzhiyun 			card->name, bc->bch.nr, cnt);
385*4882a593Smuzhiyun 		return;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
388*4882a593Smuzhiyun 		p = skb_put(bc->bch.rx_skb, cnt);
389*4882a593Smuzhiyun 	else
390*4882a593Smuzhiyun 		p = bc->hrbuf;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	for (i = 0; i < cnt; i++) {
393*4882a593Smuzhiyun 		val = card->recv.start[idx++];
394*4882a593Smuzhiyun 		if (bc->bch.nr & 2)
395*4882a593Smuzhiyun 			val >>= 8;
396*4882a593Smuzhiyun 		if (idx >= card->recv.size)
397*4882a593Smuzhiyun 			idx = 0;
398*4882a593Smuzhiyun 		p[i] = val & 0xff;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
402*4882a593Smuzhiyun 		recv_Bchannel(&bc->bch, 0, false);
403*4882a593Smuzhiyun 		return;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	pn = bc->hrbuf;
407*4882a593Smuzhiyun 	while (cnt > 0) {
408*4882a593Smuzhiyun 		stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
409*4882a593Smuzhiyun 				       bc->bch.rx_skb->data, bc->bch.maxlen);
410*4882a593Smuzhiyun 		if (stat > 0) { /* valid frame received */
411*4882a593Smuzhiyun 			p = skb_put(bc->bch.rx_skb, stat);
412*4882a593Smuzhiyun 			if (debug & DEBUG_HW_BFIFO) {
413*4882a593Smuzhiyun 				snprintf(card->log, LOG_SIZE,
414*4882a593Smuzhiyun 					 "B%1d-recv %s %d ", bc->bch.nr,
415*4882a593Smuzhiyun 					 card->name, stat);
416*4882a593Smuzhiyun 				print_hex_dump_bytes(card->log,
417*4882a593Smuzhiyun 						     DUMP_PREFIX_OFFSET, p,
418*4882a593Smuzhiyun 						     stat);
419*4882a593Smuzhiyun 			}
420*4882a593Smuzhiyun 			recv_Bchannel(&bc->bch, 0, false);
421*4882a593Smuzhiyun 			stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
422*4882a593Smuzhiyun 			if (stat < 0) {
423*4882a593Smuzhiyun 				pr_warn("%s.B%d: No memory for %d bytes\n",
424*4882a593Smuzhiyun 					card->name, bc->bch.nr, cnt);
425*4882a593Smuzhiyun 				return;
426*4882a593Smuzhiyun 			}
427*4882a593Smuzhiyun 		} else if (stat == -HDLC_CRC_ERROR) {
428*4882a593Smuzhiyun 			pr_info("%s: B%1d receive frame CRC error\n",
429*4882a593Smuzhiyun 				card->name, bc->bch.nr);
430*4882a593Smuzhiyun 		} else if (stat == -HDLC_FRAMING_ERROR) {
431*4882a593Smuzhiyun 			pr_info("%s: B%1d receive framing error\n",
432*4882a593Smuzhiyun 				card->name, bc->bch.nr);
433*4882a593Smuzhiyun 		} else if (stat == -HDLC_LENGTH_ERROR) {
434*4882a593Smuzhiyun 			pr_info("%s: B%1d receive frame too long (> %d)\n",
435*4882a593Smuzhiyun 				card->name, bc->bch.nr, bc->bch.maxlen);
436*4882a593Smuzhiyun 		}
437*4882a593Smuzhiyun 		pn += i;
438*4882a593Smuzhiyun 		cnt -= i;
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun static void
recv_tiger(struct tiger_hw * card,u8 irq_stat)443*4882a593Smuzhiyun recv_tiger(struct tiger_hw *card, u8 irq_stat)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	u32 idx;
446*4882a593Smuzhiyun 	int cnt = card->recv.size / 2;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/* Note receive is via the WRITE DMA channel */
449*4882a593Smuzhiyun 	card->last_is0 &= ~NJ_IRQM0_WR_MASK;
450*4882a593Smuzhiyun 	card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (irq_stat & NJ_IRQM0_WR_END)
453*4882a593Smuzhiyun 		idx = cnt - 1;
454*4882a593Smuzhiyun 	else
455*4882a593Smuzhiyun 		idx = card->recv.size - 1;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
458*4882a593Smuzhiyun 		read_dma(&card->bc[0], idx, cnt);
459*4882a593Smuzhiyun 	if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
460*4882a593Smuzhiyun 		read_dma(&card->bc[1], idx, cnt);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun /* sync with current DMA address at start or after exception */
464*4882a593Smuzhiyun static void
resync(struct tiger_ch * bc,struct tiger_hw * card)465*4882a593Smuzhiyun resync(struct tiger_ch *bc, struct tiger_hw *card)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
468*4882a593Smuzhiyun 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
469*4882a593Smuzhiyun 	if (bc->free > card->send.size / 2)
470*4882a593Smuzhiyun 		bc->free = card->send.size / 2;
471*4882a593Smuzhiyun 	/* currently we simple sync to the next complete free area
472*4882a593Smuzhiyun 	 * this hast the advantage that we have always maximum time to
473*4882a593Smuzhiyun 	 * handle TX irq
474*4882a593Smuzhiyun 	 */
475*4882a593Smuzhiyun 	if (card->send.idx < ((card->send.size / 2) - 1))
476*4882a593Smuzhiyun 		bc->idx = (card->recv.size / 2) - 1;
477*4882a593Smuzhiyun 	else
478*4882a593Smuzhiyun 		bc->idx = card->recv.size - 1;
479*4882a593Smuzhiyun 	bc->txstate = TX_RUN;
480*4882a593Smuzhiyun 	pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
481*4882a593Smuzhiyun 		 __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun static int bc_next_frame(struct tiger_ch *);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun static void
fill_hdlc_flag(struct tiger_ch * bc)487*4882a593Smuzhiyun fill_hdlc_flag(struct tiger_ch *bc)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	struct tiger_hw *card = bc->bch.hw;
490*4882a593Smuzhiyun 	int count, i;
491*4882a593Smuzhiyun 	u32 m, v;
492*4882a593Smuzhiyun 	u8  *p;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if (bc->free == 0)
495*4882a593Smuzhiyun 		return;
496*4882a593Smuzhiyun 	pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
497*4882a593Smuzhiyun 		 __func__, bc->bch.nr, bc->free, bc->txstate,
498*4882a593Smuzhiyun 		 bc->idx, card->send.idx);
499*4882a593Smuzhiyun 	if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
500*4882a593Smuzhiyun 		resync(bc, card);
501*4882a593Smuzhiyun 	count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
502*4882a593Smuzhiyun 				bc->hsbuf, bc->free);
503*4882a593Smuzhiyun 	pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
504*4882a593Smuzhiyun 		 bc->bch.nr, count);
505*4882a593Smuzhiyun 	bc->free -= count;
506*4882a593Smuzhiyun 	p = bc->hsbuf;
507*4882a593Smuzhiyun 	m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
508*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
509*4882a593Smuzhiyun 		if (bc->idx >= card->send.size)
510*4882a593Smuzhiyun 			bc->idx = 0;
511*4882a593Smuzhiyun 		v = card->send.start[bc->idx];
512*4882a593Smuzhiyun 		v &= m;
513*4882a593Smuzhiyun 		v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
514*4882a593Smuzhiyun 		card->send.start[bc->idx++] = v;
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 	if (debug & DEBUG_HW_BFIFO) {
517*4882a593Smuzhiyun 		snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
518*4882a593Smuzhiyun 			 bc->bch.nr, card->name, count);
519*4882a593Smuzhiyun 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun static void
fill_dma(struct tiger_ch * bc)524*4882a593Smuzhiyun fill_dma(struct tiger_ch *bc)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	struct tiger_hw *card = bc->bch.hw;
527*4882a593Smuzhiyun 	int count, i, fillempty = 0;
528*4882a593Smuzhiyun 	u32 m, v, n = 0;
529*4882a593Smuzhiyun 	u8  *p;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	if (bc->free == 0)
532*4882a593Smuzhiyun 		return;
533*4882a593Smuzhiyun 	if (!bc->bch.tx_skb) {
534*4882a593Smuzhiyun 		if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
535*4882a593Smuzhiyun 			return;
536*4882a593Smuzhiyun 		fillempty = 1;
537*4882a593Smuzhiyun 		count = card->send.size >> 1;
538*4882a593Smuzhiyun 		p = bc->bch.fill;
539*4882a593Smuzhiyun 	} else {
540*4882a593Smuzhiyun 		count = bc->bch.tx_skb->len - bc->bch.tx_idx;
541*4882a593Smuzhiyun 		if (count <= 0)
542*4882a593Smuzhiyun 			return;
543*4882a593Smuzhiyun 		pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
544*4882a593Smuzhiyun 			 card->name, __func__, bc->bch.nr, count, bc->free,
545*4882a593Smuzhiyun 			 bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
546*4882a593Smuzhiyun 			 bc->idx, card->send.idx);
547*4882a593Smuzhiyun 		p = bc->bch.tx_skb->data + bc->bch.tx_idx;
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 	if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
550*4882a593Smuzhiyun 		resync(bc, card);
551*4882a593Smuzhiyun 	if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
552*4882a593Smuzhiyun 		count = isdnhdlc_encode(&bc->hsend, p, count, &i,
553*4882a593Smuzhiyun 					bc->hsbuf, bc->free);
554*4882a593Smuzhiyun 		pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
555*4882a593Smuzhiyun 			 bc->bch.nr, i, count);
556*4882a593Smuzhiyun 		bc->bch.tx_idx += i;
557*4882a593Smuzhiyun 		bc->free -= count;
558*4882a593Smuzhiyun 		p = bc->hsbuf;
559*4882a593Smuzhiyun 	} else {
560*4882a593Smuzhiyun 		if (count > bc->free)
561*4882a593Smuzhiyun 			count = bc->free;
562*4882a593Smuzhiyun 		if (!fillempty)
563*4882a593Smuzhiyun 			bc->bch.tx_idx += count;
564*4882a593Smuzhiyun 		bc->free -= count;
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun 	m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
567*4882a593Smuzhiyun 	if (fillempty) {
568*4882a593Smuzhiyun 		n = p[0];
569*4882a593Smuzhiyun 		if (!(bc->bch.nr & 1))
570*4882a593Smuzhiyun 			n <<= 8;
571*4882a593Smuzhiyun 		for (i = 0; i < count; i++) {
572*4882a593Smuzhiyun 			if (bc->idx >= card->send.size)
573*4882a593Smuzhiyun 				bc->idx = 0;
574*4882a593Smuzhiyun 			v = card->send.start[bc->idx];
575*4882a593Smuzhiyun 			v &= m;
576*4882a593Smuzhiyun 			v |= n;
577*4882a593Smuzhiyun 			card->send.start[bc->idx++] = v;
578*4882a593Smuzhiyun 		}
579*4882a593Smuzhiyun 	} else {
580*4882a593Smuzhiyun 		for (i = 0; i < count; i++) {
581*4882a593Smuzhiyun 			if (bc->idx >= card->send.size)
582*4882a593Smuzhiyun 				bc->idx = 0;
583*4882a593Smuzhiyun 			v = card->send.start[bc->idx];
584*4882a593Smuzhiyun 			v &= m;
585*4882a593Smuzhiyun 			n = p[i];
586*4882a593Smuzhiyun 			v |= (bc->bch.nr & 1) ? n : n << 8;
587*4882a593Smuzhiyun 			card->send.start[bc->idx++] = v;
588*4882a593Smuzhiyun 		}
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun 	if (debug & DEBUG_HW_BFIFO) {
591*4882a593Smuzhiyun 		snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
592*4882a593Smuzhiyun 			 bc->bch.nr, card->name, count);
593*4882a593Smuzhiyun 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 	if (bc->free)
596*4882a593Smuzhiyun 		bc_next_frame(bc);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun static int
bc_next_frame(struct tiger_ch * bc)601*4882a593Smuzhiyun bc_next_frame(struct tiger_ch *bc)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	int ret = 1;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
606*4882a593Smuzhiyun 		fill_dma(bc);
607*4882a593Smuzhiyun 	} else {
608*4882a593Smuzhiyun 		dev_kfree_skb(bc->bch.tx_skb);
609*4882a593Smuzhiyun 		if (get_next_bframe(&bc->bch)) {
610*4882a593Smuzhiyun 			fill_dma(bc);
611*4882a593Smuzhiyun 			test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
612*4882a593Smuzhiyun 		} else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
613*4882a593Smuzhiyun 			fill_dma(bc);
614*4882a593Smuzhiyun 		} else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
615*4882a593Smuzhiyun 			test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
616*4882a593Smuzhiyun 			ret = 0;
617*4882a593Smuzhiyun 		} else {
618*4882a593Smuzhiyun 			ret = 0;
619*4882a593Smuzhiyun 		}
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 	return ret;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun static void
send_tiger_bc(struct tiger_hw * card,struct tiger_ch * bc)625*4882a593Smuzhiyun send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	int ret;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	bc->free += card->send.size / 2;
630*4882a593Smuzhiyun 	if (bc->free >= card->send.size) {
631*4882a593Smuzhiyun 		if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
632*4882a593Smuzhiyun 			pr_info("%s: B%1d TX underrun state %x\n", card->name,
633*4882a593Smuzhiyun 				bc->bch.nr, bc->txstate);
634*4882a593Smuzhiyun 			bc->txstate |= TX_UNDERRUN;
635*4882a593Smuzhiyun 		}
636*4882a593Smuzhiyun 		bc->free = card->send.size;
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun 	ret = bc_next_frame(bc);
639*4882a593Smuzhiyun 	if (!ret) {
640*4882a593Smuzhiyun 		if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
641*4882a593Smuzhiyun 			fill_hdlc_flag(bc);
642*4882a593Smuzhiyun 			return;
643*4882a593Smuzhiyun 		}
644*4882a593Smuzhiyun 		pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
645*4882a593Smuzhiyun 			 bc->bch.nr, bc->free, bc->idx, card->send.idx);
646*4882a593Smuzhiyun 		if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
647*4882a593Smuzhiyun 			fill_mem(bc, bc->idx, bc->free, 0xff);
648*4882a593Smuzhiyun 			if (bc->free == card->send.size)
649*4882a593Smuzhiyun 				bc->txstate |= TX_IDLE;
650*4882a593Smuzhiyun 		}
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun static void
send_tiger(struct tiger_hw * card,u8 irq_stat)655*4882a593Smuzhiyun send_tiger(struct tiger_hw *card, u8 irq_stat)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	int i;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* Note send is via the READ DMA channel */
660*4882a593Smuzhiyun 	if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
661*4882a593Smuzhiyun 		pr_info("%s: tiger warn write double dma %x/%x\n",
662*4882a593Smuzhiyun 			card->name, irq_stat, card->last_is0);
663*4882a593Smuzhiyun 		return;
664*4882a593Smuzhiyun 	} else {
665*4882a593Smuzhiyun 		card->last_is0 &= ~NJ_IRQM0_RD_MASK;
666*4882a593Smuzhiyun 		card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
669*4882a593Smuzhiyun 		if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
670*4882a593Smuzhiyun 			send_tiger_bc(card, &card->bc[i]);
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun static irqreturn_t
nj_irq(int intno,void * dev_id)675*4882a593Smuzhiyun nj_irq(int intno, void *dev_id)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	struct tiger_hw *card = dev_id;
678*4882a593Smuzhiyun 	u8 val, s1val, s0val;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	spin_lock(&card->lock);
681*4882a593Smuzhiyun 	s0val = inb(card->base | NJ_IRQSTAT0);
682*4882a593Smuzhiyun 	s1val = inb(card->base | NJ_IRQSTAT1);
683*4882a593Smuzhiyun 	if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
684*4882a593Smuzhiyun 		/* shared IRQ */
685*4882a593Smuzhiyun 		spin_unlock(&card->lock);
686*4882a593Smuzhiyun 		return IRQ_NONE;
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun 	pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
689*4882a593Smuzhiyun 	card->irqcnt++;
690*4882a593Smuzhiyun 	if (!(s1val & NJ_ISACIRQ)) {
691*4882a593Smuzhiyun 		val = ReadISAC_nj(card, ISAC_ISTA);
692*4882a593Smuzhiyun 		if (val)
693*4882a593Smuzhiyun 			mISDNisac_irq(&card->isac, val);
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	if (s0val)
697*4882a593Smuzhiyun 		/* write to clear */
698*4882a593Smuzhiyun 		outb(s0val, card->base | NJ_IRQSTAT0);
699*4882a593Smuzhiyun 	else
700*4882a593Smuzhiyun 		goto end;
701*4882a593Smuzhiyun 	s1val = s0val;
702*4882a593Smuzhiyun 	/* set bits in sval to indicate which page is free */
703*4882a593Smuzhiyun 	card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
704*4882a593Smuzhiyun 	card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
705*4882a593Smuzhiyun 	if (card->recv.dmacur < card->recv.dmairq)
706*4882a593Smuzhiyun 		s0val = 0x08;	/* the 2nd write area is free */
707*4882a593Smuzhiyun 	else
708*4882a593Smuzhiyun 		s0val = 0x04;	/* the 1st write area is free */
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
711*4882a593Smuzhiyun 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
712*4882a593Smuzhiyun 	if (card->send.dmacur < card->send.dmairq)
713*4882a593Smuzhiyun 		s0val |= 0x02;	/* the 2nd read area is free */
714*4882a593Smuzhiyun 	else
715*4882a593Smuzhiyun 		s0val |= 0x01;	/* the 1st read area is free */
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
718*4882a593Smuzhiyun 		 s1val, s0val, card->last_is0,
719*4882a593Smuzhiyun 		 card->recv.idx, card->send.idx);
720*4882a593Smuzhiyun 	/* test if we have a DMA interrupt */
721*4882a593Smuzhiyun 	if (s0val != card->last_is0) {
722*4882a593Smuzhiyun 		if ((s0val & NJ_IRQM0_RD_MASK) !=
723*4882a593Smuzhiyun 		    (card->last_is0 & NJ_IRQM0_RD_MASK))
724*4882a593Smuzhiyun 			/* got a write dma int */
725*4882a593Smuzhiyun 			send_tiger(card, s0val);
726*4882a593Smuzhiyun 		if ((s0val & NJ_IRQM0_WR_MASK) !=
727*4882a593Smuzhiyun 		    (card->last_is0 & NJ_IRQM0_WR_MASK))
728*4882a593Smuzhiyun 			/* got a read dma int */
729*4882a593Smuzhiyun 			recv_tiger(card, s0val);
730*4882a593Smuzhiyun 	}
731*4882a593Smuzhiyun end:
732*4882a593Smuzhiyun 	spin_unlock(&card->lock);
733*4882a593Smuzhiyun 	return IRQ_HANDLED;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun static int
nj_l2l1B(struct mISDNchannel * ch,struct sk_buff * skb)737*4882a593Smuzhiyun nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	int ret = -EINVAL;
740*4882a593Smuzhiyun 	struct bchannel *bch = container_of(ch, struct bchannel, ch);
741*4882a593Smuzhiyun 	struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
742*4882a593Smuzhiyun 	struct tiger_hw *card = bch->hw;
743*4882a593Smuzhiyun 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
744*4882a593Smuzhiyun 	unsigned long flags;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	switch (hh->prim) {
747*4882a593Smuzhiyun 	case PH_DATA_REQ:
748*4882a593Smuzhiyun 		spin_lock_irqsave(&card->lock, flags);
749*4882a593Smuzhiyun 		ret = bchannel_senddata(bch, skb);
750*4882a593Smuzhiyun 		if (ret > 0) { /* direct TX */
751*4882a593Smuzhiyun 			fill_dma(bc);
752*4882a593Smuzhiyun 			ret = 0;
753*4882a593Smuzhiyun 		}
754*4882a593Smuzhiyun 		spin_unlock_irqrestore(&card->lock, flags);
755*4882a593Smuzhiyun 		return ret;
756*4882a593Smuzhiyun 	case PH_ACTIVATE_REQ:
757*4882a593Smuzhiyun 		spin_lock_irqsave(&card->lock, flags);
758*4882a593Smuzhiyun 		if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
759*4882a593Smuzhiyun 			ret = mode_tiger(bc, ch->protocol);
760*4882a593Smuzhiyun 		else
761*4882a593Smuzhiyun 			ret = 0;
762*4882a593Smuzhiyun 		spin_unlock_irqrestore(&card->lock, flags);
763*4882a593Smuzhiyun 		if (!ret)
764*4882a593Smuzhiyun 			_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
765*4882a593Smuzhiyun 				    NULL, GFP_KERNEL);
766*4882a593Smuzhiyun 		break;
767*4882a593Smuzhiyun 	case PH_DEACTIVATE_REQ:
768*4882a593Smuzhiyun 		spin_lock_irqsave(&card->lock, flags);
769*4882a593Smuzhiyun 		mISDN_clear_bchannel(bch);
770*4882a593Smuzhiyun 		mode_tiger(bc, ISDN_P_NONE);
771*4882a593Smuzhiyun 		spin_unlock_irqrestore(&card->lock, flags);
772*4882a593Smuzhiyun 		_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
773*4882a593Smuzhiyun 			    NULL, GFP_KERNEL);
774*4882a593Smuzhiyun 		ret = 0;
775*4882a593Smuzhiyun 		break;
776*4882a593Smuzhiyun 	}
777*4882a593Smuzhiyun 	if (!ret)
778*4882a593Smuzhiyun 		dev_kfree_skb(skb);
779*4882a593Smuzhiyun 	return ret;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun static int
channel_bctrl(struct tiger_ch * bc,struct mISDN_ctrl_req * cq)783*4882a593Smuzhiyun channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	return mISDN_ctrl_bchannel(&bc->bch, cq);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun static int
nj_bctrl(struct mISDNchannel * ch,u32 cmd,void * arg)789*4882a593Smuzhiyun nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	struct bchannel *bch = container_of(ch, struct bchannel, ch);
792*4882a593Smuzhiyun 	struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
793*4882a593Smuzhiyun 	struct tiger_hw *card  = bch->hw;
794*4882a593Smuzhiyun 	int ret = -EINVAL;
795*4882a593Smuzhiyun 	u_long flags;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
798*4882a593Smuzhiyun 	switch (cmd) {
799*4882a593Smuzhiyun 	case CLOSE_CHANNEL:
800*4882a593Smuzhiyun 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
801*4882a593Smuzhiyun 		cancel_work_sync(&bch->workq);
802*4882a593Smuzhiyun 		spin_lock_irqsave(&card->lock, flags);
803*4882a593Smuzhiyun 		mISDN_clear_bchannel(bch);
804*4882a593Smuzhiyun 		mode_tiger(bc, ISDN_P_NONE);
805*4882a593Smuzhiyun 		spin_unlock_irqrestore(&card->lock, flags);
806*4882a593Smuzhiyun 		ch->protocol = ISDN_P_NONE;
807*4882a593Smuzhiyun 		ch->peer = NULL;
808*4882a593Smuzhiyun 		module_put(THIS_MODULE);
809*4882a593Smuzhiyun 		ret = 0;
810*4882a593Smuzhiyun 		break;
811*4882a593Smuzhiyun 	case CONTROL_CHANNEL:
812*4882a593Smuzhiyun 		ret = channel_bctrl(bc, arg);
813*4882a593Smuzhiyun 		break;
814*4882a593Smuzhiyun 	default:
815*4882a593Smuzhiyun 		pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
816*4882a593Smuzhiyun 	}
817*4882a593Smuzhiyun 	return ret;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun static int
channel_ctrl(struct tiger_hw * card,struct mISDN_ctrl_req * cq)821*4882a593Smuzhiyun channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	int	ret = 0;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	switch (cq->op) {
826*4882a593Smuzhiyun 	case MISDN_CTRL_GETOP:
827*4882a593Smuzhiyun 		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
828*4882a593Smuzhiyun 		break;
829*4882a593Smuzhiyun 	case MISDN_CTRL_LOOP:
830*4882a593Smuzhiyun 		/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
831*4882a593Smuzhiyun 		if (cq->channel < 0 || cq->channel > 3) {
832*4882a593Smuzhiyun 			ret = -EINVAL;
833*4882a593Smuzhiyun 			break;
834*4882a593Smuzhiyun 		}
835*4882a593Smuzhiyun 		ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
836*4882a593Smuzhiyun 		break;
837*4882a593Smuzhiyun 	case MISDN_CTRL_L1_TIMER3:
838*4882a593Smuzhiyun 		ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
839*4882a593Smuzhiyun 		break;
840*4882a593Smuzhiyun 	default:
841*4882a593Smuzhiyun 		pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
842*4882a593Smuzhiyun 		ret = -EINVAL;
843*4882a593Smuzhiyun 		break;
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 	return ret;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun static int
open_bchannel(struct tiger_hw * card,struct channel_req * rq)849*4882a593Smuzhiyun open_bchannel(struct tiger_hw *card, struct channel_req *rq)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun 	struct bchannel *bch;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
854*4882a593Smuzhiyun 		return -EINVAL;
855*4882a593Smuzhiyun 	if (rq->protocol == ISDN_P_NONE)
856*4882a593Smuzhiyun 		return -EINVAL;
857*4882a593Smuzhiyun 	bch = &card->bc[rq->adr.channel - 1].bch;
858*4882a593Smuzhiyun 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
859*4882a593Smuzhiyun 		return -EBUSY; /* b-channel can be only open once */
860*4882a593Smuzhiyun 	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
861*4882a593Smuzhiyun 	bch->ch.protocol = rq->protocol;
862*4882a593Smuzhiyun 	rq->ch = &bch->ch;
863*4882a593Smuzhiyun 	return 0;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun /*
867*4882a593Smuzhiyun  * device control function
868*4882a593Smuzhiyun  */
869*4882a593Smuzhiyun static int
nj_dctrl(struct mISDNchannel * ch,u32 cmd,void * arg)870*4882a593Smuzhiyun nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun 	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
873*4882a593Smuzhiyun 	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
874*4882a593Smuzhiyun 	struct tiger_hw	*card = dch->hw;
875*4882a593Smuzhiyun 	struct channel_req	*rq;
876*4882a593Smuzhiyun 	int			err = 0;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
879*4882a593Smuzhiyun 	switch (cmd) {
880*4882a593Smuzhiyun 	case OPEN_CHANNEL:
881*4882a593Smuzhiyun 		rq = arg;
882*4882a593Smuzhiyun 		if (rq->protocol == ISDN_P_TE_S0)
883*4882a593Smuzhiyun 			err = card->isac.open(&card->isac, rq);
884*4882a593Smuzhiyun 		else
885*4882a593Smuzhiyun 			err = open_bchannel(card, rq);
886*4882a593Smuzhiyun 		if (err)
887*4882a593Smuzhiyun 			break;
888*4882a593Smuzhiyun 		if (!try_module_get(THIS_MODULE))
889*4882a593Smuzhiyun 			pr_info("%s: cannot get module\n", card->name);
890*4882a593Smuzhiyun 		break;
891*4882a593Smuzhiyun 	case CLOSE_CHANNEL:
892*4882a593Smuzhiyun 		pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
893*4882a593Smuzhiyun 			 __builtin_return_address(0));
894*4882a593Smuzhiyun 		module_put(THIS_MODULE);
895*4882a593Smuzhiyun 		break;
896*4882a593Smuzhiyun 	case CONTROL_CHANNEL:
897*4882a593Smuzhiyun 		err = channel_ctrl(card, arg);
898*4882a593Smuzhiyun 		break;
899*4882a593Smuzhiyun 	default:
900*4882a593Smuzhiyun 		pr_debug("%s: %s unknown command %x\n",
901*4882a593Smuzhiyun 			 card->name, __func__, cmd);
902*4882a593Smuzhiyun 		return -EINVAL;
903*4882a593Smuzhiyun 	}
904*4882a593Smuzhiyun 	return err;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun static int
nj_init_card(struct tiger_hw * card)908*4882a593Smuzhiyun nj_init_card(struct tiger_hw *card)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	u_long flags;
911*4882a593Smuzhiyun 	int ret;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	spin_lock_irqsave(&card->lock, flags);
914*4882a593Smuzhiyun 	nj_disable_hwirq(card);
915*4882a593Smuzhiyun 	spin_unlock_irqrestore(&card->lock, flags);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	card->irq = card->pdev->irq;
918*4882a593Smuzhiyun 	if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
919*4882a593Smuzhiyun 		pr_info("%s: couldn't get interrupt %d\n",
920*4882a593Smuzhiyun 			card->name, card->irq);
921*4882a593Smuzhiyun 		card->irq = -1;
922*4882a593Smuzhiyun 		return -EIO;
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	spin_lock_irqsave(&card->lock, flags);
926*4882a593Smuzhiyun 	nj_reset(card);
927*4882a593Smuzhiyun 	ret = card->isac.init(&card->isac);
928*4882a593Smuzhiyun 	if (ret)
929*4882a593Smuzhiyun 		goto error;
930*4882a593Smuzhiyun 	ret = inittiger(card);
931*4882a593Smuzhiyun 	if (ret)
932*4882a593Smuzhiyun 		goto error;
933*4882a593Smuzhiyun 	mode_tiger(&card->bc[0], ISDN_P_NONE);
934*4882a593Smuzhiyun 	mode_tiger(&card->bc[1], ISDN_P_NONE);
935*4882a593Smuzhiyun error:
936*4882a593Smuzhiyun 	spin_unlock_irqrestore(&card->lock, flags);
937*4882a593Smuzhiyun 	return ret;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun static void
nj_release(struct tiger_hw * card)942*4882a593Smuzhiyun nj_release(struct tiger_hw *card)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun 	u_long flags;
945*4882a593Smuzhiyun 	int i;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	if (card->base_s) {
948*4882a593Smuzhiyun 		spin_lock_irqsave(&card->lock, flags);
949*4882a593Smuzhiyun 		nj_disable_hwirq(card);
950*4882a593Smuzhiyun 		mode_tiger(&card->bc[0], ISDN_P_NONE);
951*4882a593Smuzhiyun 		mode_tiger(&card->bc[1], ISDN_P_NONE);
952*4882a593Smuzhiyun 		spin_unlock_irqrestore(&card->lock, flags);
953*4882a593Smuzhiyun 		card->isac.release(&card->isac);
954*4882a593Smuzhiyun 		release_region(card->base, card->base_s);
955*4882a593Smuzhiyun 		card->base_s = 0;
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 	if (card->irq > 0)
958*4882a593Smuzhiyun 		free_irq(card->irq, card);
959*4882a593Smuzhiyun 	if (device_is_registered(&card->isac.dch.dev.dev))
960*4882a593Smuzhiyun 		mISDN_unregister_device(&card->isac.dch.dev);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
963*4882a593Smuzhiyun 		mISDN_freebchannel(&card->bc[i].bch);
964*4882a593Smuzhiyun 		kfree(card->bc[i].hsbuf);
965*4882a593Smuzhiyun 		kfree(card->bc[i].hrbuf);
966*4882a593Smuzhiyun 	}
967*4882a593Smuzhiyun 	if (card->dma_p)
968*4882a593Smuzhiyun 		dma_free_coherent(&card->pdev->dev, NJ_DMA_SIZE, card->dma_p,
969*4882a593Smuzhiyun 				  card->dma);
970*4882a593Smuzhiyun 	write_lock_irqsave(&card_lock, flags);
971*4882a593Smuzhiyun 	list_del(&card->list);
972*4882a593Smuzhiyun 	write_unlock_irqrestore(&card_lock, flags);
973*4882a593Smuzhiyun 	pci_clear_master(card->pdev);
974*4882a593Smuzhiyun 	pci_disable_device(card->pdev);
975*4882a593Smuzhiyun 	pci_set_drvdata(card->pdev, NULL);
976*4882a593Smuzhiyun 	kfree(card);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun static int
nj_setup(struct tiger_hw * card)981*4882a593Smuzhiyun nj_setup(struct tiger_hw *card)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	card->base = pci_resource_start(card->pdev, 0);
984*4882a593Smuzhiyun 	card->base_s = pci_resource_len(card->pdev, 0);
985*4882a593Smuzhiyun 	if (!request_region(card->base, card->base_s, card->name)) {
986*4882a593Smuzhiyun 		pr_info("%s: NETjet config port %#x-%#x already in use\n",
987*4882a593Smuzhiyun 			card->name, card->base,
988*4882a593Smuzhiyun 			(u32)(card->base + card->base_s - 1));
989*4882a593Smuzhiyun 		card->base_s = 0;
990*4882a593Smuzhiyun 		return -EIO;
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 	ASSIGN_FUNC(nj, ISAC, card->isac);
993*4882a593Smuzhiyun 	return 0;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun static int
setup_instance(struct tiger_hw * card)998*4882a593Smuzhiyun setup_instance(struct tiger_hw *card)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun 	int i, err;
1001*4882a593Smuzhiyun 	u_long flags;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
1004*4882a593Smuzhiyun 	write_lock_irqsave(&card_lock, flags);
1005*4882a593Smuzhiyun 	list_add_tail(&card->list, &Cards);
1006*4882a593Smuzhiyun 	write_unlock_irqrestore(&card_lock, flags);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	_set_debug(card);
1009*4882a593Smuzhiyun 	card->isac.name = card->name;
1010*4882a593Smuzhiyun 	spin_lock_init(&card->lock);
1011*4882a593Smuzhiyun 	card->isac.hwlock = &card->lock;
1012*4882a593Smuzhiyun 	mISDNisac_init(&card->isac, card);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
1015*4882a593Smuzhiyun 		(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
1016*4882a593Smuzhiyun 	card->isac.dch.dev.D.ctrl = nj_dctrl;
1017*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
1018*4882a593Smuzhiyun 		card->bc[i].bch.nr = i + 1;
1019*4882a593Smuzhiyun 		set_channelmap(i + 1, card->isac.dch.dev.channelmap);
1020*4882a593Smuzhiyun 		mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
1021*4882a593Smuzhiyun 				   NJ_DMA_RXSIZE >> 1);
1022*4882a593Smuzhiyun 		card->bc[i].bch.hw = card;
1023*4882a593Smuzhiyun 		card->bc[i].bch.ch.send = nj_l2l1B;
1024*4882a593Smuzhiyun 		card->bc[i].bch.ch.ctrl = nj_bctrl;
1025*4882a593Smuzhiyun 		card->bc[i].bch.ch.nr = i + 1;
1026*4882a593Smuzhiyun 		list_add(&card->bc[i].bch.ch.list,
1027*4882a593Smuzhiyun 			 &card->isac.dch.dev.bchannels);
1028*4882a593Smuzhiyun 		card->bc[i].bch.hw = card;
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 	err = nj_setup(card);
1031*4882a593Smuzhiyun 	if (err)
1032*4882a593Smuzhiyun 		goto error;
1033*4882a593Smuzhiyun 	err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
1034*4882a593Smuzhiyun 				    card->name);
1035*4882a593Smuzhiyun 	if (err)
1036*4882a593Smuzhiyun 		goto error;
1037*4882a593Smuzhiyun 	err = nj_init_card(card);
1038*4882a593Smuzhiyun 	if (!err)  {
1039*4882a593Smuzhiyun 		nj_cnt++;
1040*4882a593Smuzhiyun 		pr_notice("Netjet %d cards installed\n", nj_cnt);
1041*4882a593Smuzhiyun 		return 0;
1042*4882a593Smuzhiyun 	}
1043*4882a593Smuzhiyun error:
1044*4882a593Smuzhiyun 	nj_release(card);
1045*4882a593Smuzhiyun 	return err;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun static int
nj_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1049*4882a593Smuzhiyun nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	int err = -ENOMEM;
1052*4882a593Smuzhiyun 	int cfg;
1053*4882a593Smuzhiyun 	struct tiger_hw *card;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	if (pdev->subsystem_vendor == 0x8086 &&
1056*4882a593Smuzhiyun 	    pdev->subsystem_device == 0x0003) {
1057*4882a593Smuzhiyun 		pr_notice("Netjet: Digium X100P/X101P not handled\n");
1058*4882a593Smuzhiyun 		return -ENODEV;
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	if (pdev->subsystem_vendor == 0x55 &&
1062*4882a593Smuzhiyun 	    pdev->subsystem_device == 0x02) {
1063*4882a593Smuzhiyun 		pr_notice("Netjet: Enter!Now not handled yet\n");
1064*4882a593Smuzhiyun 		return -ENODEV;
1065*4882a593Smuzhiyun 	}
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	if (pdev->subsystem_vendor == 0xb100 &&
1068*4882a593Smuzhiyun 	    pdev->subsystem_device == 0x0003) {
1069*4882a593Smuzhiyun 		pr_notice("Netjet: Digium TDM400P not handled yet\n");
1070*4882a593Smuzhiyun 		return -ENODEV;
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
1074*4882a593Smuzhiyun 	if (!card) {
1075*4882a593Smuzhiyun 		pr_info("No kmem for Netjet\n");
1076*4882a593Smuzhiyun 		return err;
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	card->pdev = pdev;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	err = pci_enable_device(pdev);
1082*4882a593Smuzhiyun 	if (err) {
1083*4882a593Smuzhiyun 		kfree(card);
1084*4882a593Smuzhiyun 		return err;
1085*4882a593Smuzhiyun 	}
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
1088*4882a593Smuzhiyun 	       pci_name(pdev));
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	pci_set_master(pdev);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	/* the TJ300 and TJ320 must be detected, the IRQ handling is different
1093*4882a593Smuzhiyun 	 * unfortunately the chips use the same device ID, but the TJ320 has
1094*4882a593Smuzhiyun 	 * the bit20 in status PCI cfg register set
1095*4882a593Smuzhiyun 	 */
1096*4882a593Smuzhiyun 	pci_read_config_dword(pdev, 0x04, &cfg);
1097*4882a593Smuzhiyun 	if (cfg & 0x00100000)
1098*4882a593Smuzhiyun 		card->typ = NETJET_S_TJ320;
1099*4882a593Smuzhiyun 	else
1100*4882a593Smuzhiyun 		card->typ = NETJET_S_TJ300;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	card->base = pci_resource_start(pdev, 0);
1103*4882a593Smuzhiyun 	pci_set_drvdata(pdev, card);
1104*4882a593Smuzhiyun 	err = setup_instance(card);
1105*4882a593Smuzhiyun 	if (err)
1106*4882a593Smuzhiyun 		pci_set_drvdata(pdev, NULL);
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	return err;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 
nj_remove(struct pci_dev * pdev)1112*4882a593Smuzhiyun static void nj_remove(struct pci_dev *pdev)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun 	struct tiger_hw *card = pci_get_drvdata(pdev);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	if (card)
1117*4882a593Smuzhiyun 		nj_release(card);
1118*4882a593Smuzhiyun 	else
1119*4882a593Smuzhiyun 		pr_info("%s drvdata already removed\n", __func__);
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun /* We cannot select cards with PCI_SUB... IDs, since here are cards with
1123*4882a593Smuzhiyun  * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
1124*4882a593Smuzhiyun  * known other cards which not work with this driver - see probe function */
1125*4882a593Smuzhiyun static const struct pci_device_id nj_pci_ids[] = {
1126*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
1127*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1128*4882a593Smuzhiyun 	{ }
1129*4882a593Smuzhiyun };
1130*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, nj_pci_ids);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun static struct pci_driver nj_driver = {
1133*4882a593Smuzhiyun 	.name = "netjet",
1134*4882a593Smuzhiyun 	.probe = nj_probe,
1135*4882a593Smuzhiyun 	.remove = nj_remove,
1136*4882a593Smuzhiyun 	.id_table = nj_pci_ids,
1137*4882a593Smuzhiyun };
1138*4882a593Smuzhiyun 
nj_init(void)1139*4882a593Smuzhiyun static int __init nj_init(void)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	int err;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
1144*4882a593Smuzhiyun 	err = pci_register_driver(&nj_driver);
1145*4882a593Smuzhiyun 	return err;
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun 
nj_cleanup(void)1148*4882a593Smuzhiyun static void __exit nj_cleanup(void)
1149*4882a593Smuzhiyun {
1150*4882a593Smuzhiyun 	pci_unregister_driver(&nj_driver);
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun module_init(nj_init);
1154*4882a593Smuzhiyun module_exit(nj_cleanup);
1155