1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun iphase.c: Device driver for Interphase ATM PCI adapter cards
3*4882a593Smuzhiyun Author: Peter Wang <pwang@iphase.com>
4*4882a593Smuzhiyun Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5*4882a593Smuzhiyun Interphase Corporation <www.iphase.com>
6*4882a593Smuzhiyun Version: 1.0
7*4882a593Smuzhiyun *******************************************************************************
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun This software may be used and distributed according to the terms
10*4882a593Smuzhiyun of the GNU General Public License (GPL), incorporated herein by reference.
11*4882a593Smuzhiyun Drivers based on this skeleton fall under the GPL and must retain
12*4882a593Smuzhiyun the authorship (implicit copyright) notice.
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun This program is distributed in the hope that it will be useful, but
15*4882a593Smuzhiyun WITHOUT ANY WARRANTY; without even the implied warranty of
16*4882a593Smuzhiyun MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17*4882a593Smuzhiyun General Public License for more details.
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20*4882a593Smuzhiyun was originally written by Monalisa Agrawal at UNH. Now this driver
21*4882a593Smuzhiyun supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22*4882a593Smuzhiyun card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23*4882a593Smuzhiyun in terms of PHY type, the size of control memory and the size of
24*4882a593Smuzhiyun packet memory. The following are the change log and history:
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun Bugfix the Mona's UBR driver.
27*4882a593Smuzhiyun Modify the basic memory allocation and dma logic.
28*4882a593Smuzhiyun Port the driver to the latest kernel from 2.0.46.
29*4882a593Smuzhiyun Complete the ABR logic of the driver, and added the ABR work-
30*4882a593Smuzhiyun around for the hardware anormalies.
31*4882a593Smuzhiyun Add the CBR support.
32*4882a593Smuzhiyun Add the flow control logic to the driver to allow rate-limit VC.
33*4882a593Smuzhiyun Add 4K VC support to the board with 512K control memory.
34*4882a593Smuzhiyun Add the support of all the variants of the Interphase ATM PCI
35*4882a593Smuzhiyun (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36*4882a593Smuzhiyun (25M UTP25) and x531 (DS3 and E3).
37*4882a593Smuzhiyun Add SMP support.
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun Support and updates available at: ftp://ftp.iphase.com/pub/atm
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun *******************************************************************************/
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #include <linux/module.h>
44*4882a593Smuzhiyun #include <linux/kernel.h>
45*4882a593Smuzhiyun #include <linux/mm.h>
46*4882a593Smuzhiyun #include <linux/pci.h>
47*4882a593Smuzhiyun #include <linux/errno.h>
48*4882a593Smuzhiyun #include <linux/atm.h>
49*4882a593Smuzhiyun #include <linux/atmdev.h>
50*4882a593Smuzhiyun #include <linux/sonet.h>
51*4882a593Smuzhiyun #include <linux/skbuff.h>
52*4882a593Smuzhiyun #include <linux/time.h>
53*4882a593Smuzhiyun #include <linux/delay.h>
54*4882a593Smuzhiyun #include <linux/uio.h>
55*4882a593Smuzhiyun #include <linux/init.h>
56*4882a593Smuzhiyun #include <linux/interrupt.h>
57*4882a593Smuzhiyun #include <linux/wait.h>
58*4882a593Smuzhiyun #include <linux/slab.h>
59*4882a593Smuzhiyun #include <asm/io.h>
60*4882a593Smuzhiyun #include <linux/atomic.h>
61*4882a593Smuzhiyun #include <linux/uaccess.h>
62*4882a593Smuzhiyun #include <asm/string.h>
63*4882a593Smuzhiyun #include <asm/byteorder.h>
64*4882a593Smuzhiyun #include <linux/vmalloc.h>
65*4882a593Smuzhiyun #include <linux/jiffies.h>
66*4882a593Smuzhiyun #include <linux/nospec.h>
67*4882a593Smuzhiyun #include "iphase.h"
68*4882a593Smuzhiyun #include "suni.h"
69*4882a593Smuzhiyun #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
74*4882a593Smuzhiyun static void desc_dbg(IADEV *iadev);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun static IADEV *ia_dev[8];
77*4882a593Smuzhiyun static struct atm_dev *_ia_dev[8];
78*4882a593Smuzhiyun static int iadev_count;
79*4882a593Smuzhiyun static void ia_led_timer(struct timer_list *unused);
80*4882a593Smuzhiyun static DEFINE_TIMER(ia_timer, ia_led_timer);
81*4882a593Smuzhiyun static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
82*4882a593Smuzhiyun static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
83*4882a593Smuzhiyun static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
84*4882a593Smuzhiyun |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun module_param(IA_TX_BUF, int, 0);
87*4882a593Smuzhiyun module_param(IA_TX_BUF_SZ, int, 0);
88*4882a593Smuzhiyun module_param(IA_RX_BUF, int, 0);
89*4882a593Smuzhiyun module_param(IA_RX_BUF_SZ, int, 0);
90*4882a593Smuzhiyun module_param(IADebugFlag, uint, 0644);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun MODULE_LICENSE("GPL");
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**************************** IA_LIB **********************************/
95*4882a593Smuzhiyun
ia_init_rtn_q(IARTN_Q * que)96*4882a593Smuzhiyun static void ia_init_rtn_q (IARTN_Q *que)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun que->next = NULL;
99*4882a593Smuzhiyun que->tail = NULL;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
ia_enque_head_rtn_q(IARTN_Q * que,IARTN_Q * data)102*4882a593Smuzhiyun static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun data->next = NULL;
105*4882a593Smuzhiyun if (que->next == NULL)
106*4882a593Smuzhiyun que->next = que->tail = data;
107*4882a593Smuzhiyun else {
108*4882a593Smuzhiyun data->next = que->next;
109*4882a593Smuzhiyun que->next = data;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun return;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
ia_enque_rtn_q(IARTN_Q * que,struct desc_tbl_t data)114*4882a593Smuzhiyun static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
115*4882a593Smuzhiyun IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
116*4882a593Smuzhiyun if (!entry)
117*4882a593Smuzhiyun return -ENOMEM;
118*4882a593Smuzhiyun entry->data = data;
119*4882a593Smuzhiyun entry->next = NULL;
120*4882a593Smuzhiyun if (que->next == NULL)
121*4882a593Smuzhiyun que->next = que->tail = entry;
122*4882a593Smuzhiyun else {
123*4882a593Smuzhiyun que->tail->next = entry;
124*4882a593Smuzhiyun que->tail = que->tail->next;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun return 1;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
ia_deque_rtn_q(IARTN_Q * que)129*4882a593Smuzhiyun static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
130*4882a593Smuzhiyun IARTN_Q *tmpdata;
131*4882a593Smuzhiyun if (que->next == NULL)
132*4882a593Smuzhiyun return NULL;
133*4882a593Smuzhiyun tmpdata = que->next;
134*4882a593Smuzhiyun if ( que->next == que->tail)
135*4882a593Smuzhiyun que->next = que->tail = NULL;
136*4882a593Smuzhiyun else
137*4882a593Smuzhiyun que->next = que->next->next;
138*4882a593Smuzhiyun return tmpdata;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
ia_hack_tcq(IADEV * dev)141*4882a593Smuzhiyun static void ia_hack_tcq(IADEV *dev) {
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun u_short desc1;
144*4882a593Smuzhiyun u_short tcq_wr;
145*4882a593Smuzhiyun struct ia_vcc *iavcc_r = NULL;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
148*4882a593Smuzhiyun while (dev->host_tcq_wr != tcq_wr) {
149*4882a593Smuzhiyun desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
150*4882a593Smuzhiyun if (!desc1) ;
151*4882a593Smuzhiyun else if (!dev->desc_tbl[desc1 -1].timestamp) {
152*4882a593Smuzhiyun IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
153*4882a593Smuzhiyun *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun else if (dev->desc_tbl[desc1 -1].timestamp) {
156*4882a593Smuzhiyun if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
157*4882a593Smuzhiyun printk("IA: Fatal err in get_desc\n");
158*4882a593Smuzhiyun continue;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun iavcc_r->vc_desc_cnt--;
161*4882a593Smuzhiyun dev->desc_tbl[desc1 -1].timestamp = 0;
162*4882a593Smuzhiyun IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
163*4882a593Smuzhiyun dev->desc_tbl[desc1 -1].txskb, desc1);)
164*4882a593Smuzhiyun if (iavcc_r->pcr < dev->rate_limit) {
165*4882a593Smuzhiyun IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
166*4882a593Smuzhiyun if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
167*4882a593Smuzhiyun printk("ia_hack_tcq: No memory available\n");
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun dev->desc_tbl[desc1 -1].iavcc = NULL;
170*4882a593Smuzhiyun dev->desc_tbl[desc1 -1].txskb = NULL;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun dev->host_tcq_wr += 2;
173*4882a593Smuzhiyun if (dev->host_tcq_wr > dev->ffL.tcq_ed)
174*4882a593Smuzhiyun dev->host_tcq_wr = dev->ffL.tcq_st;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun } /* ia_hack_tcq */
177*4882a593Smuzhiyun
get_desc(IADEV * dev,struct ia_vcc * iavcc)178*4882a593Smuzhiyun static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
179*4882a593Smuzhiyun u_short desc_num, i;
180*4882a593Smuzhiyun struct sk_buff *skb;
181*4882a593Smuzhiyun struct ia_vcc *iavcc_r = NULL;
182*4882a593Smuzhiyun unsigned long delta;
183*4882a593Smuzhiyun static unsigned long timer = 0;
184*4882a593Smuzhiyun int ltimeout;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun ia_hack_tcq (dev);
187*4882a593Smuzhiyun if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
188*4882a593Smuzhiyun timer = jiffies;
189*4882a593Smuzhiyun i=0;
190*4882a593Smuzhiyun while (i < dev->num_tx_desc) {
191*4882a593Smuzhiyun if (!dev->desc_tbl[i].timestamp) {
192*4882a593Smuzhiyun i++;
193*4882a593Smuzhiyun continue;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
196*4882a593Smuzhiyun delta = jiffies - dev->desc_tbl[i].timestamp;
197*4882a593Smuzhiyun if (delta >= ltimeout) {
198*4882a593Smuzhiyun IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
199*4882a593Smuzhiyun if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
200*4882a593Smuzhiyun dev->ffL.tcq_rd = dev->ffL.tcq_ed;
201*4882a593Smuzhiyun else
202*4882a593Smuzhiyun dev->ffL.tcq_rd -= 2;
203*4882a593Smuzhiyun *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
204*4882a593Smuzhiyun if (!(skb = dev->desc_tbl[i].txskb) ||
205*4882a593Smuzhiyun !(iavcc_r = dev->desc_tbl[i].iavcc))
206*4882a593Smuzhiyun printk("Fatal err, desc table vcc or skb is NULL\n");
207*4882a593Smuzhiyun else
208*4882a593Smuzhiyun iavcc_r->vc_desc_cnt--;
209*4882a593Smuzhiyun dev->desc_tbl[i].timestamp = 0;
210*4882a593Smuzhiyun dev->desc_tbl[i].iavcc = NULL;
211*4882a593Smuzhiyun dev->desc_tbl[i].txskb = NULL;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun i++;
214*4882a593Smuzhiyun } /* while */
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun if (dev->ffL.tcq_rd == dev->host_tcq_wr)
217*4882a593Smuzhiyun return 0xFFFF;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Get the next available descriptor number from TCQ */
220*4882a593Smuzhiyun desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
223*4882a593Smuzhiyun dev->ffL.tcq_rd += 2;
224*4882a593Smuzhiyun if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
225*4882a593Smuzhiyun dev->ffL.tcq_rd = dev->ffL.tcq_st;
226*4882a593Smuzhiyun if (dev->ffL.tcq_rd == dev->host_tcq_wr)
227*4882a593Smuzhiyun return 0xFFFF;
228*4882a593Smuzhiyun desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* get system time */
232*4882a593Smuzhiyun dev->desc_tbl[desc_num -1].timestamp = jiffies;
233*4882a593Smuzhiyun return desc_num;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
clear_lockup(struct atm_vcc * vcc,IADEV * dev)236*4882a593Smuzhiyun static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
237*4882a593Smuzhiyun u_char foundLockUp;
238*4882a593Smuzhiyun vcstatus_t *vcstatus;
239*4882a593Smuzhiyun u_short *shd_tbl;
240*4882a593Smuzhiyun u_short tempCellSlot, tempFract;
241*4882a593Smuzhiyun struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
242*4882a593Smuzhiyun struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
243*4882a593Smuzhiyun u_int i;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class == ATM_ABR) {
246*4882a593Smuzhiyun vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
247*4882a593Smuzhiyun vcstatus->cnt++;
248*4882a593Smuzhiyun foundLockUp = 0;
249*4882a593Smuzhiyun if( vcstatus->cnt == 0x05 ) {
250*4882a593Smuzhiyun abr_vc += vcc->vci;
251*4882a593Smuzhiyun eabr_vc += vcc->vci;
252*4882a593Smuzhiyun if( eabr_vc->last_desc ) {
253*4882a593Smuzhiyun if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
254*4882a593Smuzhiyun /* Wait for 10 Micro sec */
255*4882a593Smuzhiyun udelay(10);
256*4882a593Smuzhiyun if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
257*4882a593Smuzhiyun foundLockUp = 1;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun else {
260*4882a593Smuzhiyun tempCellSlot = abr_vc->last_cell_slot;
261*4882a593Smuzhiyun tempFract = abr_vc->fraction;
262*4882a593Smuzhiyun if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
263*4882a593Smuzhiyun && (tempFract == dev->testTable[vcc->vci]->fract))
264*4882a593Smuzhiyun foundLockUp = 1;
265*4882a593Smuzhiyun dev->testTable[vcc->vci]->lastTime = tempCellSlot;
266*4882a593Smuzhiyun dev->testTable[vcc->vci]->fract = tempFract;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun } /* last descriptor */
269*4882a593Smuzhiyun vcstatus->cnt = 0;
270*4882a593Smuzhiyun } /* vcstatus->cnt */
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (foundLockUp) {
273*4882a593Smuzhiyun IF_ABR(printk("LOCK UP found\n");)
274*4882a593Smuzhiyun writew(0xFFFD, dev->seg_reg+MODE_REG_0);
275*4882a593Smuzhiyun /* Wait for 10 Micro sec */
276*4882a593Smuzhiyun udelay(10);
277*4882a593Smuzhiyun abr_vc->status &= 0xFFF8;
278*4882a593Smuzhiyun abr_vc->status |= 0x0001; /* state is idle */
279*4882a593Smuzhiyun shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
280*4882a593Smuzhiyun for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
281*4882a593Smuzhiyun if (i < dev->num_vc)
282*4882a593Smuzhiyun shd_tbl[i] = vcc->vci;
283*4882a593Smuzhiyun else
284*4882a593Smuzhiyun IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
285*4882a593Smuzhiyun writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
286*4882a593Smuzhiyun writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
287*4882a593Smuzhiyun writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
288*4882a593Smuzhiyun vcstatus->cnt = 0;
289*4882a593Smuzhiyun } /* foundLockUp */
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun } /* if an ABR VC */
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
298*4882a593Smuzhiyun **
299*4882a593Smuzhiyun ** +----+----+------------------+-------------------------------+
300*4882a593Smuzhiyun ** | R | NZ | 5-bit exponent | 9-bit mantissa |
301*4882a593Smuzhiyun ** +----+----+------------------+-------------------------------+
302*4882a593Smuzhiyun **
303*4882a593Smuzhiyun ** R = reserved (written as 0)
304*4882a593Smuzhiyun ** NZ = 0 if 0 cells/sec; 1 otherwise
305*4882a593Smuzhiyun **
306*4882a593Smuzhiyun ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun static u16
cellrate_to_float(u32 cr)309*4882a593Smuzhiyun cellrate_to_float(u32 cr)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun #define NZ 0x4000
313*4882a593Smuzhiyun #define M_BITS 9 /* Number of bits in mantissa */
314*4882a593Smuzhiyun #define E_BITS 5 /* Number of bits in exponent */
315*4882a593Smuzhiyun #define M_MASK 0x1ff
316*4882a593Smuzhiyun #define E_MASK 0x1f
317*4882a593Smuzhiyun u16 flot;
318*4882a593Smuzhiyun u32 tmp = cr & 0x00ffffff;
319*4882a593Smuzhiyun int i = 0;
320*4882a593Smuzhiyun if (cr == 0)
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun while (tmp != 1) {
323*4882a593Smuzhiyun tmp >>= 1;
324*4882a593Smuzhiyun i++;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun if (i == M_BITS)
327*4882a593Smuzhiyun flot = NZ | (i << M_BITS) | (cr & M_MASK);
328*4882a593Smuzhiyun else if (i < M_BITS)
329*4882a593Smuzhiyun flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
330*4882a593Smuzhiyun else
331*4882a593Smuzhiyun flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
332*4882a593Smuzhiyun return flot;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun #if 0
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun static u32
340*4882a593Smuzhiyun float_to_cellrate(u16 rate)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun u32 exp, mantissa, cps;
343*4882a593Smuzhiyun if ((rate & NZ) == 0)
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun exp = (rate >> M_BITS) & E_MASK;
346*4882a593Smuzhiyun mantissa = rate & M_MASK;
347*4882a593Smuzhiyun if (exp == 0)
348*4882a593Smuzhiyun return 1;
349*4882a593Smuzhiyun cps = (1 << M_BITS) | mantissa;
350*4882a593Smuzhiyun if (exp == M_BITS)
351*4882a593Smuzhiyun cps = cps;
352*4882a593Smuzhiyun else if (exp > M_BITS)
353*4882a593Smuzhiyun cps <<= (exp - M_BITS);
354*4882a593Smuzhiyun else
355*4882a593Smuzhiyun cps >>= (M_BITS - exp);
356*4882a593Smuzhiyun return cps;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun
init_abr_vc(IADEV * dev,srv_cls_param_t * srv_p)360*4882a593Smuzhiyun static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
361*4882a593Smuzhiyun srv_p->class_type = ATM_ABR;
362*4882a593Smuzhiyun srv_p->pcr = dev->LineRate;
363*4882a593Smuzhiyun srv_p->mcr = 0;
364*4882a593Smuzhiyun srv_p->icr = 0x055cb7;
365*4882a593Smuzhiyun srv_p->tbe = 0xffffff;
366*4882a593Smuzhiyun srv_p->frtt = 0x3a;
367*4882a593Smuzhiyun srv_p->rif = 0xf;
368*4882a593Smuzhiyun srv_p->rdf = 0xb;
369*4882a593Smuzhiyun srv_p->nrm = 0x4;
370*4882a593Smuzhiyun srv_p->trm = 0x7;
371*4882a593Smuzhiyun srv_p->cdf = 0x3;
372*4882a593Smuzhiyun srv_p->adtf = 50;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun static int
ia_open_abr_vc(IADEV * dev,srv_cls_param_t * srv_p,struct atm_vcc * vcc,u8 flag)376*4882a593Smuzhiyun ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
377*4882a593Smuzhiyun struct atm_vcc *vcc, u8 flag)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun f_vc_abr_entry *f_abr_vc;
380*4882a593Smuzhiyun r_vc_abr_entry *r_abr_vc;
381*4882a593Smuzhiyun u32 icr;
382*4882a593Smuzhiyun u8 trm, nrm, crm;
383*4882a593Smuzhiyun u16 adtf, air, *ptr16;
384*4882a593Smuzhiyun f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
385*4882a593Smuzhiyun f_abr_vc += vcc->vci;
386*4882a593Smuzhiyun switch (flag) {
387*4882a593Smuzhiyun case 1: /* FFRED initialization */
388*4882a593Smuzhiyun #if 0 /* sanity check */
389*4882a593Smuzhiyun if (srv_p->pcr == 0)
390*4882a593Smuzhiyun return INVALID_PCR;
391*4882a593Smuzhiyun if (srv_p->pcr > dev->LineRate)
392*4882a593Smuzhiyun srv_p->pcr = dev->LineRate;
393*4882a593Smuzhiyun if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
394*4882a593Smuzhiyun return MCR_UNAVAILABLE;
395*4882a593Smuzhiyun if (srv_p->mcr > srv_p->pcr)
396*4882a593Smuzhiyun return INVALID_MCR;
397*4882a593Smuzhiyun if (!(srv_p->icr))
398*4882a593Smuzhiyun srv_p->icr = srv_p->pcr;
399*4882a593Smuzhiyun if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
400*4882a593Smuzhiyun return INVALID_ICR;
401*4882a593Smuzhiyun if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
402*4882a593Smuzhiyun return INVALID_TBE;
403*4882a593Smuzhiyun if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
404*4882a593Smuzhiyun return INVALID_FRTT;
405*4882a593Smuzhiyun if (srv_p->nrm > MAX_NRM)
406*4882a593Smuzhiyun return INVALID_NRM;
407*4882a593Smuzhiyun if (srv_p->trm > MAX_TRM)
408*4882a593Smuzhiyun return INVALID_TRM;
409*4882a593Smuzhiyun if (srv_p->adtf > MAX_ADTF)
410*4882a593Smuzhiyun return INVALID_ADTF;
411*4882a593Smuzhiyun else if (srv_p->adtf == 0)
412*4882a593Smuzhiyun srv_p->adtf = 1;
413*4882a593Smuzhiyun if (srv_p->cdf > MAX_CDF)
414*4882a593Smuzhiyun return INVALID_CDF;
415*4882a593Smuzhiyun if (srv_p->rif > MAX_RIF)
416*4882a593Smuzhiyun return INVALID_RIF;
417*4882a593Smuzhiyun if (srv_p->rdf > MAX_RDF)
418*4882a593Smuzhiyun return INVALID_RDF;
419*4882a593Smuzhiyun #endif
420*4882a593Smuzhiyun memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
421*4882a593Smuzhiyun f_abr_vc->f_vc_type = ABR;
422*4882a593Smuzhiyun nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
423*4882a593Smuzhiyun /* i.e 2**n = 2 << (n-1) */
424*4882a593Smuzhiyun f_abr_vc->f_nrm = nrm << 8 | nrm;
425*4882a593Smuzhiyun trm = 100000/(2 << (16 - srv_p->trm));
426*4882a593Smuzhiyun if ( trm == 0) trm = 1;
427*4882a593Smuzhiyun f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
428*4882a593Smuzhiyun crm = srv_p->tbe / nrm;
429*4882a593Smuzhiyun if (crm == 0) crm = 1;
430*4882a593Smuzhiyun f_abr_vc->f_crm = crm & 0xff;
431*4882a593Smuzhiyun f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
432*4882a593Smuzhiyun icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
433*4882a593Smuzhiyun ((srv_p->tbe/srv_p->frtt)*1000000) :
434*4882a593Smuzhiyun (1000000/(srv_p->frtt/srv_p->tbe)));
435*4882a593Smuzhiyun f_abr_vc->f_icr = cellrate_to_float(icr);
436*4882a593Smuzhiyun adtf = (10000 * srv_p->adtf)/8192;
437*4882a593Smuzhiyun if (adtf == 0) adtf = 1;
438*4882a593Smuzhiyun f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
439*4882a593Smuzhiyun f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
440*4882a593Smuzhiyun f_abr_vc->f_acr = f_abr_vc->f_icr;
441*4882a593Smuzhiyun f_abr_vc->f_status = 0x0042;
442*4882a593Smuzhiyun break;
443*4882a593Smuzhiyun case 0: /* RFRED initialization */
444*4882a593Smuzhiyun ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
445*4882a593Smuzhiyun *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
446*4882a593Smuzhiyun r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
447*4882a593Smuzhiyun r_abr_vc += vcc->vci;
448*4882a593Smuzhiyun r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
449*4882a593Smuzhiyun air = srv_p->pcr << (15 - srv_p->rif);
450*4882a593Smuzhiyun if (air == 0) air = 1;
451*4882a593Smuzhiyun r_abr_vc->r_air = cellrate_to_float(air);
452*4882a593Smuzhiyun dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
453*4882a593Smuzhiyun dev->sum_mcr += srv_p->mcr;
454*4882a593Smuzhiyun dev->n_abr++;
455*4882a593Smuzhiyun break;
456*4882a593Smuzhiyun default:
457*4882a593Smuzhiyun break;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun return 0;
460*4882a593Smuzhiyun }
ia_cbr_setup(IADEV * dev,struct atm_vcc * vcc)461*4882a593Smuzhiyun static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
462*4882a593Smuzhiyun u32 rateLow=0, rateHigh, rate;
463*4882a593Smuzhiyun int entries;
464*4882a593Smuzhiyun struct ia_vcc *ia_vcc;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun int idealSlot =0, testSlot, toBeAssigned, inc;
467*4882a593Smuzhiyun u32 spacing;
468*4882a593Smuzhiyun u16 *SchedTbl, *TstSchedTbl;
469*4882a593Smuzhiyun u16 cbrVC, vcIndex;
470*4882a593Smuzhiyun u32 fracSlot = 0;
471*4882a593Smuzhiyun u32 sp_mod = 0;
472*4882a593Smuzhiyun u32 sp_mod2 = 0;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* IpAdjustTrafficParams */
475*4882a593Smuzhiyun if (vcc->qos.txtp.max_pcr <= 0) {
476*4882a593Smuzhiyun IF_ERR(printk("PCR for CBR not defined\n");)
477*4882a593Smuzhiyun return -1;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun rate = vcc->qos.txtp.max_pcr;
480*4882a593Smuzhiyun entries = rate / dev->Granularity;
481*4882a593Smuzhiyun IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
482*4882a593Smuzhiyun entries, rate, dev->Granularity);)
483*4882a593Smuzhiyun if (entries < 1)
484*4882a593Smuzhiyun IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
485*4882a593Smuzhiyun rateLow = entries * dev->Granularity;
486*4882a593Smuzhiyun rateHigh = (entries + 1) * dev->Granularity;
487*4882a593Smuzhiyun if (3*(rate - rateLow) > (rateHigh - rate))
488*4882a593Smuzhiyun entries++;
489*4882a593Smuzhiyun if (entries > dev->CbrRemEntries) {
490*4882a593Smuzhiyun IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
491*4882a593Smuzhiyun IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
492*4882a593Smuzhiyun entries, dev->CbrRemEntries);)
493*4882a593Smuzhiyun return -EBUSY;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun ia_vcc = INPH_IA_VCC(vcc);
497*4882a593Smuzhiyun ia_vcc->NumCbrEntry = entries;
498*4882a593Smuzhiyun dev->sum_mcr += entries * dev->Granularity;
499*4882a593Smuzhiyun /* IaFFrednInsertCbrSched */
500*4882a593Smuzhiyun // Starting at an arbitrary location, place the entries into the table
501*4882a593Smuzhiyun // as smoothly as possible
502*4882a593Smuzhiyun cbrVC = 0;
503*4882a593Smuzhiyun spacing = dev->CbrTotEntries / entries;
504*4882a593Smuzhiyun sp_mod = dev->CbrTotEntries % entries; // get modulo
505*4882a593Smuzhiyun toBeAssigned = entries;
506*4882a593Smuzhiyun fracSlot = 0;
507*4882a593Smuzhiyun vcIndex = vcc->vci;
508*4882a593Smuzhiyun IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
509*4882a593Smuzhiyun while (toBeAssigned)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun // If this is the first time, start the table loading for this connection
512*4882a593Smuzhiyun // as close to entryPoint as possible.
513*4882a593Smuzhiyun if (toBeAssigned == entries)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun idealSlot = dev->CbrEntryPt;
516*4882a593Smuzhiyun dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
517*4882a593Smuzhiyun if (dev->CbrEntryPt >= dev->CbrTotEntries)
518*4882a593Smuzhiyun dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
519*4882a593Smuzhiyun } else {
520*4882a593Smuzhiyun idealSlot += (u32)(spacing + fracSlot); // Point to the next location
521*4882a593Smuzhiyun // in the table that would be smoothest
522*4882a593Smuzhiyun fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
523*4882a593Smuzhiyun sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun if (idealSlot >= (int)dev->CbrTotEntries)
526*4882a593Smuzhiyun idealSlot -= dev->CbrTotEntries;
527*4882a593Smuzhiyun // Continuously check around this ideal value until a null
528*4882a593Smuzhiyun // location is encountered.
529*4882a593Smuzhiyun SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
530*4882a593Smuzhiyun inc = 0;
531*4882a593Smuzhiyun testSlot = idealSlot;
532*4882a593Smuzhiyun TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
533*4882a593Smuzhiyun IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
534*4882a593Smuzhiyun testSlot, TstSchedTbl,toBeAssigned);)
535*4882a593Smuzhiyun memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
536*4882a593Smuzhiyun while (cbrVC) // If another VC at this location, we have to keep looking
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun inc++;
539*4882a593Smuzhiyun testSlot = idealSlot - inc;
540*4882a593Smuzhiyun if (testSlot < 0) { // Wrap if necessary
541*4882a593Smuzhiyun testSlot += dev->CbrTotEntries;
542*4882a593Smuzhiyun IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
543*4882a593Smuzhiyun SchedTbl,testSlot);)
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
546*4882a593Smuzhiyun memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
547*4882a593Smuzhiyun if (!cbrVC)
548*4882a593Smuzhiyun break;
549*4882a593Smuzhiyun testSlot = idealSlot + inc;
550*4882a593Smuzhiyun if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
551*4882a593Smuzhiyun testSlot -= dev->CbrTotEntries;
552*4882a593Smuzhiyun IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
553*4882a593Smuzhiyun IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
554*4882a593Smuzhiyun testSlot, toBeAssigned);)
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun // set table index and read in value
557*4882a593Smuzhiyun TstSchedTbl = (u16*)(SchedTbl + testSlot);
558*4882a593Smuzhiyun IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
559*4882a593Smuzhiyun TstSchedTbl,cbrVC,inc);)
560*4882a593Smuzhiyun memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
561*4882a593Smuzhiyun } /* while */
562*4882a593Smuzhiyun // Move this VCI number into this location of the CBR Sched table.
563*4882a593Smuzhiyun memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
564*4882a593Smuzhiyun dev->CbrRemEntries--;
565*4882a593Smuzhiyun toBeAssigned--;
566*4882a593Smuzhiyun } /* while */
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /* IaFFrednCbrEnable */
569*4882a593Smuzhiyun dev->NumEnabledCBR++;
570*4882a593Smuzhiyun if (dev->NumEnabledCBR == 1) {
571*4882a593Smuzhiyun writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
572*4882a593Smuzhiyun IF_CBR(printk("CBR is enabled\n");)
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
ia_cbrVc_close(struct atm_vcc * vcc)576*4882a593Smuzhiyun static void ia_cbrVc_close (struct atm_vcc *vcc) {
577*4882a593Smuzhiyun IADEV *iadev;
578*4882a593Smuzhiyun u16 *SchedTbl, NullVci = 0;
579*4882a593Smuzhiyun u32 i, NumFound;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun iadev = INPH_IA_DEV(vcc->dev);
582*4882a593Smuzhiyun iadev->NumEnabledCBR--;
583*4882a593Smuzhiyun SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
584*4882a593Smuzhiyun if (iadev->NumEnabledCBR == 0) {
585*4882a593Smuzhiyun writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
586*4882a593Smuzhiyun IF_CBR (printk("CBR support disabled\n");)
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun NumFound = 0;
589*4882a593Smuzhiyun for (i=0; i < iadev->CbrTotEntries; i++)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun if (*SchedTbl == vcc->vci) {
592*4882a593Smuzhiyun iadev->CbrRemEntries++;
593*4882a593Smuzhiyun *SchedTbl = NullVci;
594*4882a593Smuzhiyun IF_CBR(NumFound++;)
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun SchedTbl++;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
ia_avail_descs(IADEV * iadev)601*4882a593Smuzhiyun static int ia_avail_descs(IADEV *iadev) {
602*4882a593Smuzhiyun int tmp = 0;
603*4882a593Smuzhiyun ia_hack_tcq(iadev);
604*4882a593Smuzhiyun if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
605*4882a593Smuzhiyun tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
606*4882a593Smuzhiyun else
607*4882a593Smuzhiyun tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
608*4882a593Smuzhiyun iadev->ffL.tcq_st) / 2;
609*4882a593Smuzhiyun return tmp;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
613*4882a593Smuzhiyun
ia_que_tx(IADEV * iadev)614*4882a593Smuzhiyun static int ia_que_tx (IADEV *iadev) {
615*4882a593Smuzhiyun struct sk_buff *skb;
616*4882a593Smuzhiyun int num_desc;
617*4882a593Smuzhiyun struct atm_vcc *vcc;
618*4882a593Smuzhiyun num_desc = ia_avail_descs(iadev);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
621*4882a593Smuzhiyun if (!(vcc = ATM_SKB(skb)->vcc)) {
622*4882a593Smuzhiyun dev_kfree_skb_any(skb);
623*4882a593Smuzhiyun printk("ia_que_tx: Null vcc\n");
624*4882a593Smuzhiyun break;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun if (!test_bit(ATM_VF_READY,&vcc->flags)) {
627*4882a593Smuzhiyun dev_kfree_skb_any(skb);
628*4882a593Smuzhiyun printk("Free the SKB on closed vci %d \n", vcc->vci);
629*4882a593Smuzhiyun break;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun if (ia_pkt_tx (vcc, skb)) {
632*4882a593Smuzhiyun skb_queue_head(&iadev->tx_backlog, skb);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun num_desc--;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun return 0;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
ia_tx_poll(IADEV * iadev)639*4882a593Smuzhiyun static void ia_tx_poll (IADEV *iadev) {
640*4882a593Smuzhiyun struct atm_vcc *vcc = NULL;
641*4882a593Smuzhiyun struct sk_buff *skb = NULL, *skb1 = NULL;
642*4882a593Smuzhiyun struct ia_vcc *iavcc;
643*4882a593Smuzhiyun IARTN_Q * rtne;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun ia_hack_tcq(iadev);
646*4882a593Smuzhiyun while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
647*4882a593Smuzhiyun skb = rtne->data.txskb;
648*4882a593Smuzhiyun if (!skb) {
649*4882a593Smuzhiyun printk("ia_tx_poll: skb is null\n");
650*4882a593Smuzhiyun goto out;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun vcc = ATM_SKB(skb)->vcc;
653*4882a593Smuzhiyun if (!vcc) {
654*4882a593Smuzhiyun printk("ia_tx_poll: vcc is null\n");
655*4882a593Smuzhiyun dev_kfree_skb_any(skb);
656*4882a593Smuzhiyun goto out;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun iavcc = INPH_IA_VCC(vcc);
660*4882a593Smuzhiyun if (!iavcc) {
661*4882a593Smuzhiyun printk("ia_tx_poll: iavcc is null\n");
662*4882a593Smuzhiyun dev_kfree_skb_any(skb);
663*4882a593Smuzhiyun goto out;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun skb1 = skb_dequeue(&iavcc->txing_skb);
667*4882a593Smuzhiyun while (skb1 && (skb1 != skb)) {
668*4882a593Smuzhiyun if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
669*4882a593Smuzhiyun printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun IF_ERR(printk("Release the SKB not match\n");)
672*4882a593Smuzhiyun if ((vcc->pop) && (skb1->len != 0))
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun vcc->pop(vcc, skb1);
675*4882a593Smuzhiyun IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
676*4882a593Smuzhiyun (long)skb1);)
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun else
679*4882a593Smuzhiyun dev_kfree_skb_any(skb1);
680*4882a593Smuzhiyun skb1 = skb_dequeue(&iavcc->txing_skb);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun if (!skb1) {
683*4882a593Smuzhiyun IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
684*4882a593Smuzhiyun ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
685*4882a593Smuzhiyun break;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun if ((vcc->pop) && (skb->len != 0))
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun vcc->pop(vcc, skb);
690*4882a593Smuzhiyun IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun else
693*4882a593Smuzhiyun dev_kfree_skb_any(skb);
694*4882a593Smuzhiyun kfree(rtne);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun ia_que_tx(iadev);
697*4882a593Smuzhiyun out:
698*4882a593Smuzhiyun return;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun #if 0
701*4882a593Smuzhiyun static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun u32 t;
704*4882a593Smuzhiyun int i;
705*4882a593Smuzhiyun /*
706*4882a593Smuzhiyun * Issue a command to enable writes to the NOVRAM
707*4882a593Smuzhiyun */
708*4882a593Smuzhiyun NVRAM_CMD (EXTEND + EWEN);
709*4882a593Smuzhiyun NVRAM_CLR_CE;
710*4882a593Smuzhiyun /*
711*4882a593Smuzhiyun * issue the write command
712*4882a593Smuzhiyun */
713*4882a593Smuzhiyun NVRAM_CMD(IAWRITE + addr);
714*4882a593Smuzhiyun /*
715*4882a593Smuzhiyun * Send the data, starting with D15, then D14, and so on for 16 bits
716*4882a593Smuzhiyun */
717*4882a593Smuzhiyun for (i=15; i>=0; i--) {
718*4882a593Smuzhiyun NVRAM_CLKOUT (val & 0x8000);
719*4882a593Smuzhiyun val <<= 1;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun NVRAM_CLR_CE;
722*4882a593Smuzhiyun CFG_OR(NVCE);
723*4882a593Smuzhiyun t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
724*4882a593Smuzhiyun while (!(t & NVDO))
725*4882a593Smuzhiyun t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun NVRAM_CLR_CE;
728*4882a593Smuzhiyun /*
729*4882a593Smuzhiyun * disable writes again
730*4882a593Smuzhiyun */
731*4882a593Smuzhiyun NVRAM_CMD(EXTEND + EWDS)
732*4882a593Smuzhiyun NVRAM_CLR_CE;
733*4882a593Smuzhiyun CFG_AND(~NVDI);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun #endif
736*4882a593Smuzhiyun
ia_eeprom_get(IADEV * iadev,u32 addr)737*4882a593Smuzhiyun static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun u_short val;
740*4882a593Smuzhiyun u32 t;
741*4882a593Smuzhiyun int i;
742*4882a593Smuzhiyun /*
743*4882a593Smuzhiyun * Read the first bit that was clocked with the falling edge of the
744*4882a593Smuzhiyun * the last command data clock
745*4882a593Smuzhiyun */
746*4882a593Smuzhiyun NVRAM_CMD(IAREAD + addr);
747*4882a593Smuzhiyun /*
748*4882a593Smuzhiyun * Now read the rest of the bits, the next bit read is D14, then D13,
749*4882a593Smuzhiyun * and so on.
750*4882a593Smuzhiyun */
751*4882a593Smuzhiyun val = 0;
752*4882a593Smuzhiyun for (i=15; i>=0; i--) {
753*4882a593Smuzhiyun NVRAM_CLKIN(t);
754*4882a593Smuzhiyun val |= (t << i);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun NVRAM_CLR_CE;
757*4882a593Smuzhiyun CFG_AND(~NVDI);
758*4882a593Smuzhiyun return val;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
ia_hw_type(IADEV * iadev)761*4882a593Smuzhiyun static void ia_hw_type(IADEV *iadev) {
762*4882a593Smuzhiyun u_short memType = ia_eeprom_get(iadev, 25);
763*4882a593Smuzhiyun iadev->memType = memType;
764*4882a593Smuzhiyun if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
765*4882a593Smuzhiyun iadev->num_tx_desc = IA_TX_BUF;
766*4882a593Smuzhiyun iadev->tx_buf_sz = IA_TX_BUF_SZ;
767*4882a593Smuzhiyun iadev->num_rx_desc = IA_RX_BUF;
768*4882a593Smuzhiyun iadev->rx_buf_sz = IA_RX_BUF_SZ;
769*4882a593Smuzhiyun } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
770*4882a593Smuzhiyun if (IA_TX_BUF == DFL_TX_BUFFERS)
771*4882a593Smuzhiyun iadev->num_tx_desc = IA_TX_BUF / 2;
772*4882a593Smuzhiyun else
773*4882a593Smuzhiyun iadev->num_tx_desc = IA_TX_BUF;
774*4882a593Smuzhiyun iadev->tx_buf_sz = IA_TX_BUF_SZ;
775*4882a593Smuzhiyun if (IA_RX_BUF == DFL_RX_BUFFERS)
776*4882a593Smuzhiyun iadev->num_rx_desc = IA_RX_BUF / 2;
777*4882a593Smuzhiyun else
778*4882a593Smuzhiyun iadev->num_rx_desc = IA_RX_BUF;
779*4882a593Smuzhiyun iadev->rx_buf_sz = IA_RX_BUF_SZ;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun else {
782*4882a593Smuzhiyun if (IA_TX_BUF == DFL_TX_BUFFERS)
783*4882a593Smuzhiyun iadev->num_tx_desc = IA_TX_BUF / 8;
784*4882a593Smuzhiyun else
785*4882a593Smuzhiyun iadev->num_tx_desc = IA_TX_BUF;
786*4882a593Smuzhiyun iadev->tx_buf_sz = IA_TX_BUF_SZ;
787*4882a593Smuzhiyun if (IA_RX_BUF == DFL_RX_BUFFERS)
788*4882a593Smuzhiyun iadev->num_rx_desc = IA_RX_BUF / 8;
789*4882a593Smuzhiyun else
790*4882a593Smuzhiyun iadev->num_rx_desc = IA_RX_BUF;
791*4882a593Smuzhiyun iadev->rx_buf_sz = IA_RX_BUF_SZ;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
794*4882a593Smuzhiyun IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
795*4882a593Smuzhiyun iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
796*4882a593Smuzhiyun iadev->rx_buf_sz, iadev->rx_pkt_ram);)
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun #if 0
799*4882a593Smuzhiyun if ((memType & FE_MASK) == FE_SINGLE_MODE) {
800*4882a593Smuzhiyun iadev->phy_type = PHY_OC3C_S;
801*4882a593Smuzhiyun else if ((memType & FE_MASK) == FE_UTP_OPTION)
802*4882a593Smuzhiyun iadev->phy_type = PHY_UTP155;
803*4882a593Smuzhiyun else
804*4882a593Smuzhiyun iadev->phy_type = PHY_OC3C_M;
805*4882a593Smuzhiyun #endif
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun iadev->phy_type = memType & FE_MASK;
808*4882a593Smuzhiyun IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
809*4882a593Smuzhiyun memType,iadev->phy_type);)
810*4882a593Smuzhiyun if (iadev->phy_type == FE_25MBIT_PHY)
811*4882a593Smuzhiyun iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
812*4882a593Smuzhiyun else if (iadev->phy_type == FE_DS3_PHY)
813*4882a593Smuzhiyun iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
814*4882a593Smuzhiyun else if (iadev->phy_type == FE_E3_PHY)
815*4882a593Smuzhiyun iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
816*4882a593Smuzhiyun else
817*4882a593Smuzhiyun iadev->LineRate = (u32)(ATM_OC3_PCR);
818*4882a593Smuzhiyun IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun return readl(ia->phy + (reg >> 2));
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun writel(val, ia->phy + (reg >> 2));
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun static void ia_frontend_intr(struct iadev_priv *iadev)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun u32 status;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (iadev->phy_type & FE_25MBIT_PHY) {
837*4882a593Smuzhiyun status = ia_phy_read32(iadev, MB25_INTR_STATUS);
838*4882a593Smuzhiyun iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
839*4882a593Smuzhiyun } else if (iadev->phy_type & FE_DS3_PHY) {
840*4882a593Smuzhiyun ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
841*4882a593Smuzhiyun status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
842*4882a593Smuzhiyun iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
843*4882a593Smuzhiyun } else if (iadev->phy_type & FE_E3_PHY) {
844*4882a593Smuzhiyun ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
845*4882a593Smuzhiyun status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
846*4882a593Smuzhiyun iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
847*4882a593Smuzhiyun } else {
848*4882a593Smuzhiyun status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
849*4882a593Smuzhiyun iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun printk(KERN_INFO "IA: SUNI carrier %s\n",
853*4882a593Smuzhiyun iadev->carrier_detect ? "detected" : "lost signal");
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun static void ia_mb25_init(struct iadev_priv *iadev)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun #if 0
859*4882a593Smuzhiyun mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
860*4882a593Smuzhiyun #endif
861*4882a593Smuzhiyun ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
862*4882a593Smuzhiyun ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun iadev->carrier_detect =
865*4882a593Smuzhiyun (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun struct ia_reg {
869*4882a593Smuzhiyun u16 reg;
870*4882a593Smuzhiyun u16 val;
871*4882a593Smuzhiyun };
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun static void ia_phy_write(struct iadev_priv *iadev,
874*4882a593Smuzhiyun const struct ia_reg *regs, int len)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun while (len--) {
877*4882a593Smuzhiyun ia_phy_write32(iadev, regs->reg, regs->val);
878*4882a593Smuzhiyun regs++;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun static const struct ia_reg suni_ds3_init[] = {
885*4882a593Smuzhiyun { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
886*4882a593Smuzhiyun { SUNI_DS3_FRM_CFG, 0x01 },
887*4882a593Smuzhiyun { SUNI_DS3_TRAN_CFG, 0x01 },
888*4882a593Smuzhiyun { SUNI_CONFIG, 0 },
889*4882a593Smuzhiyun { SUNI_SPLR_CFG, 0 },
890*4882a593Smuzhiyun { SUNI_SPLT_CFG, 0 }
891*4882a593Smuzhiyun };
892*4882a593Smuzhiyun u32 status;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
895*4882a593Smuzhiyun iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun static const struct ia_reg suni_e3_init[] = {
903*4882a593Smuzhiyun { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
904*4882a593Smuzhiyun { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
905*4882a593Smuzhiyun { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
906*4882a593Smuzhiyun { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
907*4882a593Smuzhiyun { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
908*4882a593Smuzhiyun { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
909*4882a593Smuzhiyun { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
910*4882a593Smuzhiyun { SUNI_SPLR_CFG, 0x41 },
911*4882a593Smuzhiyun { SUNI_SPLT_CFG, 0x41 }
912*4882a593Smuzhiyun };
913*4882a593Smuzhiyun u32 status;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
916*4882a593Smuzhiyun iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
917*4882a593Smuzhiyun ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun static void ia_suni_pm7345_init(struct iadev_priv *iadev)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun static const struct ia_reg suni_init[] = {
923*4882a593Smuzhiyun /* Enable RSOP loss of signal interrupt. */
924*4882a593Smuzhiyun { SUNI_INTR_ENBL, 0x28 },
925*4882a593Smuzhiyun /* Clear error counters. */
926*4882a593Smuzhiyun { SUNI_ID_RESET, 0 },
927*4882a593Smuzhiyun /* Clear "PMCTST" in master test register. */
928*4882a593Smuzhiyun { SUNI_MASTER_TEST, 0 },
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun { SUNI_RXCP_CTRL, 0x2c },
931*4882a593Smuzhiyun { SUNI_RXCP_FCTRL, 0x81 },
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun { SUNI_RXCP_IDLE_PAT_H1, 0 },
934*4882a593Smuzhiyun { SUNI_RXCP_IDLE_PAT_H2, 0 },
935*4882a593Smuzhiyun { SUNI_RXCP_IDLE_PAT_H3, 0 },
936*4882a593Smuzhiyun { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun { SUNI_RXCP_IDLE_MASK_H1, 0xff },
939*4882a593Smuzhiyun { SUNI_RXCP_IDLE_MASK_H2, 0xff },
940*4882a593Smuzhiyun { SUNI_RXCP_IDLE_MASK_H3, 0xff },
941*4882a593Smuzhiyun { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun { SUNI_RXCP_CELL_PAT_H1, 0 },
944*4882a593Smuzhiyun { SUNI_RXCP_CELL_PAT_H2, 0 },
945*4882a593Smuzhiyun { SUNI_RXCP_CELL_PAT_H3, 0 },
946*4882a593Smuzhiyun { SUNI_RXCP_CELL_PAT_H4, 0x01 },
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun { SUNI_RXCP_CELL_MASK_H1, 0xff },
949*4882a593Smuzhiyun { SUNI_RXCP_CELL_MASK_H2, 0xff },
950*4882a593Smuzhiyun { SUNI_RXCP_CELL_MASK_H3, 0xff },
951*4882a593Smuzhiyun { SUNI_RXCP_CELL_MASK_H4, 0xff },
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun { SUNI_TXCP_CTRL, 0xa4 },
954*4882a593Smuzhiyun { SUNI_TXCP_INTR_EN_STS, 0x10 },
955*4882a593Smuzhiyun { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
956*4882a593Smuzhiyun };
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun if (iadev->phy_type & FE_DS3_PHY)
959*4882a593Smuzhiyun ia_suni_pm7345_init_ds3(iadev);
960*4882a593Smuzhiyun else
961*4882a593Smuzhiyun ia_suni_pm7345_init_e3(iadev);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
966*4882a593Smuzhiyun ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
967*4882a593Smuzhiyun SUNI_PM7345_DLB | SUNI_PM7345_PLB));
968*4882a593Smuzhiyun #ifdef __SNMP__
969*4882a593Smuzhiyun suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
970*4882a593Smuzhiyun #endif /* __SNMP__ */
971*4882a593Smuzhiyun return;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun /***************************** IA_LIB END *****************************/
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun #ifdef CONFIG_ATM_IA_DEBUG
978*4882a593Smuzhiyun static int tcnter = 0;
979*4882a593Smuzhiyun static void xdump( u_char* cp, int length, char* prefix )
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun int col, count;
982*4882a593Smuzhiyun u_char prntBuf[120];
983*4882a593Smuzhiyun u_char* pBuf = prntBuf;
984*4882a593Smuzhiyun count = 0;
985*4882a593Smuzhiyun while(count < length){
986*4882a593Smuzhiyun pBuf += sprintf( pBuf, "%s", prefix );
987*4882a593Smuzhiyun for(col = 0;count + col < length && col < 16; col++){
988*4882a593Smuzhiyun if (col != 0 && (col % 4) == 0)
989*4882a593Smuzhiyun pBuf += sprintf( pBuf, " " );
990*4882a593Smuzhiyun pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun while(col++ < 16){ /* pad end of buffer with blanks */
993*4882a593Smuzhiyun if ((col % 4) == 0)
994*4882a593Smuzhiyun sprintf( pBuf, " " );
995*4882a593Smuzhiyun pBuf += sprintf( pBuf, " " );
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun pBuf += sprintf( pBuf, " " );
998*4882a593Smuzhiyun for(col = 0;count + col < length && col < 16; col++){
999*4882a593Smuzhiyun if (isprint((int)cp[count + col]))
1000*4882a593Smuzhiyun pBuf += sprintf( pBuf, "%c", cp[count + col] );
1001*4882a593Smuzhiyun else
1002*4882a593Smuzhiyun pBuf += sprintf( pBuf, "." );
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun printk("%s\n", prntBuf);
1005*4882a593Smuzhiyun count += col;
1006*4882a593Smuzhiyun pBuf = prntBuf;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun } /* close xdump(... */
1010*4882a593Smuzhiyun #endif /* CONFIG_ATM_IA_DEBUG */
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun static struct atm_dev *ia_boards = NULL;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun #define ACTUAL_RAM_BASE \
1016*4882a593Smuzhiyun RAM_BASE*((iadev->mem)/(128 * 1024))
1017*4882a593Smuzhiyun #define ACTUAL_SEG_RAM_BASE \
1018*4882a593Smuzhiyun IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1019*4882a593Smuzhiyun #define ACTUAL_REASS_RAM_BASE \
1020*4882a593Smuzhiyun IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun /*-- some utilities and memory allocation stuff will come here -------------*/
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun static void desc_dbg(IADEV *iadev) {
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1028*4882a593Smuzhiyun u32 i;
1029*4882a593Smuzhiyun void __iomem *tmp;
1030*4882a593Smuzhiyun // regval = readl((u32)ia_cmds->maddr);
1031*4882a593Smuzhiyun tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1032*4882a593Smuzhiyun printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1033*4882a593Smuzhiyun tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1034*4882a593Smuzhiyun readw(iadev->seg_ram+tcq_wr_ptr-2));
1035*4882a593Smuzhiyun printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1036*4882a593Smuzhiyun iadev->ffL.tcq_rd);
1037*4882a593Smuzhiyun tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1038*4882a593Smuzhiyun tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1039*4882a593Smuzhiyun printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1040*4882a593Smuzhiyun i = 0;
1041*4882a593Smuzhiyun while (tcq_st_ptr != tcq_ed_ptr) {
1042*4882a593Smuzhiyun tmp = iadev->seg_ram+tcq_st_ptr;
1043*4882a593Smuzhiyun printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1044*4882a593Smuzhiyun tcq_st_ptr += 2;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun for(i=0; i <iadev->num_tx_desc; i++)
1047*4882a593Smuzhiyun printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun /*----------------------------- Receiving side stuff --------------------------*/
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun static void rx_excp_rcvd(struct atm_dev *dev)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun #if 0 /* closing the receiving size will cause too many excp int */
1056*4882a593Smuzhiyun IADEV *iadev;
1057*4882a593Smuzhiyun u_short state;
1058*4882a593Smuzhiyun u_short excpq_rd_ptr;
1059*4882a593Smuzhiyun //u_short *ptr;
1060*4882a593Smuzhiyun int vci, error = 1;
1061*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1062*4882a593Smuzhiyun state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1063*4882a593Smuzhiyun while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1064*4882a593Smuzhiyun { printk("state = %x \n", state);
1065*4882a593Smuzhiyun excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1066*4882a593Smuzhiyun printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1067*4882a593Smuzhiyun if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1068*4882a593Smuzhiyun IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1069*4882a593Smuzhiyun // TODO: update exception stat
1070*4882a593Smuzhiyun vci = readw(iadev->reass_ram+excpq_rd_ptr);
1071*4882a593Smuzhiyun error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1072*4882a593Smuzhiyun // pwang_test
1073*4882a593Smuzhiyun excpq_rd_ptr += 4;
1074*4882a593Smuzhiyun if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1075*4882a593Smuzhiyun excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1076*4882a593Smuzhiyun writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1077*4882a593Smuzhiyun state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun #endif
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun static void free_desc(struct atm_dev *dev, int desc)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun IADEV *iadev;
1085*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1086*4882a593Smuzhiyun writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1087*4882a593Smuzhiyun iadev->rfL.fdq_wr +=2;
1088*4882a593Smuzhiyun if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1089*4882a593Smuzhiyun iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1090*4882a593Smuzhiyun writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun static int rx_pkt(struct atm_dev *dev)
1095*4882a593Smuzhiyun {
1096*4882a593Smuzhiyun IADEV *iadev;
1097*4882a593Smuzhiyun struct atm_vcc *vcc;
1098*4882a593Smuzhiyun unsigned short status;
1099*4882a593Smuzhiyun struct rx_buf_desc __iomem *buf_desc_ptr;
1100*4882a593Smuzhiyun int desc;
1101*4882a593Smuzhiyun struct dle* wr_ptr;
1102*4882a593Smuzhiyun int len;
1103*4882a593Smuzhiyun struct sk_buff *skb;
1104*4882a593Smuzhiyun u_int buf_addr, dma_addr;
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1107*4882a593Smuzhiyun if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1110*4882a593Smuzhiyun return -EINVAL;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun /* mask 1st 3 bits to get the actual descno. */
1113*4882a593Smuzhiyun desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1114*4882a593Smuzhiyun IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1115*4882a593Smuzhiyun iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1116*4882a593Smuzhiyun printk(" pcq_wr_ptr = 0x%x\n",
1117*4882a593Smuzhiyun readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1118*4882a593Smuzhiyun /* update the read pointer - maybe we shud do this in the end*/
1119*4882a593Smuzhiyun if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1120*4882a593Smuzhiyun iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1121*4882a593Smuzhiyun else
1122*4882a593Smuzhiyun iadev->rfL.pcq_rd += 2;
1123*4882a593Smuzhiyun writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* get the buffer desc entry.
1126*4882a593Smuzhiyun update stuff. - doesn't seem to be any update necessary
1127*4882a593Smuzhiyun */
1128*4882a593Smuzhiyun buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1129*4882a593Smuzhiyun /* make the ptr point to the corresponding buffer desc entry */
1130*4882a593Smuzhiyun buf_desc_ptr += desc;
1131*4882a593Smuzhiyun if (!desc || (desc > iadev->num_rx_desc) ||
1132*4882a593Smuzhiyun ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1133*4882a593Smuzhiyun free_desc(dev, desc);
1134*4882a593Smuzhiyun IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1135*4882a593Smuzhiyun return -1;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1138*4882a593Smuzhiyun if (!vcc)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun free_desc(dev, desc);
1141*4882a593Smuzhiyun printk("IA: null vcc, drop PDU\n");
1142*4882a593Smuzhiyun return -1;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun /* might want to check the status bits for errors */
1147*4882a593Smuzhiyun status = (u_short) (buf_desc_ptr->desc_mode);
1148*4882a593Smuzhiyun if (status & (RX_CER | RX_PTE | RX_OFL))
1149*4882a593Smuzhiyun {
1150*4882a593Smuzhiyun atomic_inc(&vcc->stats->rx_err);
1151*4882a593Smuzhiyun IF_ERR(printk("IA: bad packet, dropping it");)
1152*4882a593Smuzhiyun if (status & RX_CER) {
1153*4882a593Smuzhiyun IF_ERR(printk(" cause: packet CRC error\n");)
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun else if (status & RX_PTE) {
1156*4882a593Smuzhiyun IF_ERR(printk(" cause: packet time out\n");)
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun else {
1159*4882a593Smuzhiyun IF_ERR(printk(" cause: buffer overflow\n");)
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun goto out_free_desc;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /*
1165*4882a593Smuzhiyun build DLE.
1166*4882a593Smuzhiyun */
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1169*4882a593Smuzhiyun dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1170*4882a593Smuzhiyun len = dma_addr - buf_addr;
1171*4882a593Smuzhiyun if (len > iadev->rx_buf_sz) {
1172*4882a593Smuzhiyun printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1173*4882a593Smuzhiyun atomic_inc(&vcc->stats->rx_err);
1174*4882a593Smuzhiyun goto out_free_desc;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1178*4882a593Smuzhiyun if (vcc->vci < 32)
1179*4882a593Smuzhiyun printk("Drop control packets\n");
1180*4882a593Smuzhiyun goto out_free_desc;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun skb_put(skb,len);
1183*4882a593Smuzhiyun // pwang_test
1184*4882a593Smuzhiyun ATM_SKB(skb)->vcc = vcc;
1185*4882a593Smuzhiyun ATM_DESC(skb) = desc;
1186*4882a593Smuzhiyun skb_queue_tail(&iadev->rx_dma_q, skb);
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun /* Build the DLE structure */
1189*4882a593Smuzhiyun wr_ptr = iadev->rx_dle_q.write;
1190*4882a593Smuzhiyun wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1191*4882a593Smuzhiyun len, DMA_FROM_DEVICE);
1192*4882a593Smuzhiyun wr_ptr->local_pkt_addr = buf_addr;
1193*4882a593Smuzhiyun wr_ptr->bytes = len; /* We don't know this do we ?? */
1194*4882a593Smuzhiyun wr_ptr->mode = DMA_INT_ENABLE;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* shud take care of wrap around here too. */
1197*4882a593Smuzhiyun if(++wr_ptr == iadev->rx_dle_q.end)
1198*4882a593Smuzhiyun wr_ptr = iadev->rx_dle_q.start;
1199*4882a593Smuzhiyun iadev->rx_dle_q.write = wr_ptr;
1200*4882a593Smuzhiyun udelay(1);
1201*4882a593Smuzhiyun /* Increment transaction counter */
1202*4882a593Smuzhiyun writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1203*4882a593Smuzhiyun out: return 0;
1204*4882a593Smuzhiyun out_free_desc:
1205*4882a593Smuzhiyun free_desc(dev, desc);
1206*4882a593Smuzhiyun goto out;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun static void rx_intr(struct atm_dev *dev)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun IADEV *iadev;
1212*4882a593Smuzhiyun u_short status;
1213*4882a593Smuzhiyun u_short state, i;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1216*4882a593Smuzhiyun status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1217*4882a593Smuzhiyun IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1218*4882a593Smuzhiyun if (status & RX_PKT_RCVD)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun /* do something */
1221*4882a593Smuzhiyun /* Basically recvd an interrupt for receiving a packet.
1222*4882a593Smuzhiyun A descriptor would have been written to the packet complete
1223*4882a593Smuzhiyun queue. Get all the descriptors and set up dma to move the
1224*4882a593Smuzhiyun packets till the packet complete queue is empty..
1225*4882a593Smuzhiyun */
1226*4882a593Smuzhiyun state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1227*4882a593Smuzhiyun IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1228*4882a593Smuzhiyun while(!(state & PCQ_EMPTY))
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun rx_pkt(dev);
1231*4882a593Smuzhiyun state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun iadev->rxing = 1;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun if (status & RX_FREEQ_EMPT)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun if (iadev->rxing) {
1238*4882a593Smuzhiyun iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1239*4882a593Smuzhiyun iadev->rx_tmp_jif = jiffies;
1240*4882a593Smuzhiyun iadev->rxing = 0;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1243*4882a593Smuzhiyun ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1244*4882a593Smuzhiyun for (i = 1; i <= iadev->num_rx_desc; i++)
1245*4882a593Smuzhiyun free_desc(dev, i);
1246*4882a593Smuzhiyun printk("Test logic RUN!!!!\n");
1247*4882a593Smuzhiyun writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1248*4882a593Smuzhiyun iadev->rxing = 1;
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun if (status & RX_EXCP_RCVD)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun /* probably need to handle the exception queue also. */
1256*4882a593Smuzhiyun IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1257*4882a593Smuzhiyun rx_excp_rcvd(dev);
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun if (status & RX_RAW_RCVD)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun /* need to handle the raw incoming cells. This deepnds on
1264*4882a593Smuzhiyun whether we have programmed to receive the raw cells or not.
1265*4882a593Smuzhiyun Else ignore. */
1266*4882a593Smuzhiyun IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun static void rx_dle_intr(struct atm_dev *dev)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun IADEV *iadev;
1274*4882a593Smuzhiyun struct atm_vcc *vcc;
1275*4882a593Smuzhiyun struct sk_buff *skb;
1276*4882a593Smuzhiyun int desc;
1277*4882a593Smuzhiyun u_short state;
1278*4882a593Smuzhiyun struct dle *dle, *cur_dle;
1279*4882a593Smuzhiyun u_int dle_lp;
1280*4882a593Smuzhiyun int len;
1281*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun /* free all the dles done, that is just update our own dle read pointer
1284*4882a593Smuzhiyun - do we really need to do this. Think not. */
1285*4882a593Smuzhiyun /* DMA is done, just get all the recevie buffers from the rx dma queue
1286*4882a593Smuzhiyun and push them up to the higher layer protocol. Also free the desc
1287*4882a593Smuzhiyun associated with the buffer. */
1288*4882a593Smuzhiyun dle = iadev->rx_dle_q.read;
1289*4882a593Smuzhiyun dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1290*4882a593Smuzhiyun cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1291*4882a593Smuzhiyun while(dle != cur_dle)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun /* free the DMAed skb */
1294*4882a593Smuzhiyun skb = skb_dequeue(&iadev->rx_dma_q);
1295*4882a593Smuzhiyun if (!skb)
1296*4882a593Smuzhiyun goto INCR_DLE;
1297*4882a593Smuzhiyun desc = ATM_DESC(skb);
1298*4882a593Smuzhiyun free_desc(dev, desc);
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun if (!(len = skb->len))
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun printk("rx_dle_intr: skb len 0\n");
1303*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun else
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun struct cpcs_trailer *trailer;
1308*4882a593Smuzhiyun u_short length;
1309*4882a593Smuzhiyun struct ia_vcc *ia_vcc;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1312*4882a593Smuzhiyun len, DMA_FROM_DEVICE);
1313*4882a593Smuzhiyun /* no VCC related housekeeping done as yet. lets see */
1314*4882a593Smuzhiyun vcc = ATM_SKB(skb)->vcc;
1315*4882a593Smuzhiyun if (!vcc) {
1316*4882a593Smuzhiyun printk("IA: null vcc\n");
1317*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1318*4882a593Smuzhiyun goto INCR_DLE;
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun ia_vcc = INPH_IA_VCC(vcc);
1321*4882a593Smuzhiyun if (ia_vcc == NULL)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun atomic_inc(&vcc->stats->rx_err);
1324*4882a593Smuzhiyun atm_return(vcc, skb->truesize);
1325*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1326*4882a593Smuzhiyun goto INCR_DLE;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun // get real pkt length pwang_test
1329*4882a593Smuzhiyun trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1330*4882a593Smuzhiyun skb->len - sizeof(*trailer));
1331*4882a593Smuzhiyun length = swap_byte_order(trailer->length);
1332*4882a593Smuzhiyun if ((length > iadev->rx_buf_sz) || (length >
1333*4882a593Smuzhiyun (skb->len - sizeof(struct cpcs_trailer))))
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun atomic_inc(&vcc->stats->rx_err);
1336*4882a593Smuzhiyun IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1337*4882a593Smuzhiyun length, skb->len);)
1338*4882a593Smuzhiyun atm_return(vcc, skb->truesize);
1339*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1340*4882a593Smuzhiyun goto INCR_DLE;
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun skb_trim(skb, length);
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun /* Display the packet */
1345*4882a593Smuzhiyun IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1346*4882a593Smuzhiyun xdump(skb->data, skb->len, "RX: ");
1347*4882a593Smuzhiyun printk("\n");)
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun IF_RX(printk("rx_dle_intr: skb push");)
1350*4882a593Smuzhiyun vcc->push(vcc,skb);
1351*4882a593Smuzhiyun atomic_inc(&vcc->stats->rx);
1352*4882a593Smuzhiyun iadev->rx_pkt_cnt++;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun INCR_DLE:
1355*4882a593Smuzhiyun if (++dle == iadev->rx_dle_q.end)
1356*4882a593Smuzhiyun dle = iadev->rx_dle_q.start;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun iadev->rx_dle_q.read = dle;
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun /* if the interrupts are masked because there were no free desc available,
1361*4882a593Smuzhiyun unmask them now. */
1362*4882a593Smuzhiyun if (!iadev->rxing) {
1363*4882a593Smuzhiyun state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1364*4882a593Smuzhiyun if (!(state & FREEQ_EMPTY)) {
1365*4882a593Smuzhiyun state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1366*4882a593Smuzhiyun writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1367*4882a593Smuzhiyun iadev->reass_reg+REASS_MASK_REG);
1368*4882a593Smuzhiyun iadev->rxing++;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun static int open_rx(struct atm_vcc *vcc)
1375*4882a593Smuzhiyun {
1376*4882a593Smuzhiyun IADEV *iadev;
1377*4882a593Smuzhiyun u_short __iomem *vc_table;
1378*4882a593Smuzhiyun u_short __iomem *reass_ptr;
1379*4882a593Smuzhiyun IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1382*4882a593Smuzhiyun iadev = INPH_IA_DEV(vcc->dev);
1383*4882a593Smuzhiyun if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1384*4882a593Smuzhiyun if (iadev->phy_type & FE_25MBIT_PHY) {
1385*4882a593Smuzhiyun printk("IA: ABR not support\n");
1386*4882a593Smuzhiyun return -EINVAL;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun /* Make only this VCI in the vc table valid and let all
1390*4882a593Smuzhiyun others be invalid entries */
1391*4882a593Smuzhiyun vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1392*4882a593Smuzhiyun vc_table += vcc->vci;
1393*4882a593Smuzhiyun /* mask the last 6 bits and OR it with 3 for 1K VCs */
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun *vc_table = vcc->vci << 6;
1396*4882a593Smuzhiyun /* Also keep a list of open rx vcs so that we can attach them with
1397*4882a593Smuzhiyun incoming PDUs later. */
1398*4882a593Smuzhiyun if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1399*4882a593Smuzhiyun (vcc->qos.txtp.traffic_class == ATM_ABR))
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun srv_cls_param_t srv_p;
1402*4882a593Smuzhiyun init_abr_vc(iadev, &srv_p);
1403*4882a593Smuzhiyun ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun else { /* for UBR later may need to add CBR logic */
1406*4882a593Smuzhiyun reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1407*4882a593Smuzhiyun reass_ptr += vcc->vci;
1408*4882a593Smuzhiyun *reass_ptr = NO_AAL5_PKT;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun if (iadev->rx_open[vcc->vci])
1412*4882a593Smuzhiyun printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1413*4882a593Smuzhiyun vcc->dev->number, vcc->vci);
1414*4882a593Smuzhiyun iadev->rx_open[vcc->vci] = vcc;
1415*4882a593Smuzhiyun return 0;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun static int rx_init(struct atm_dev *dev)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun IADEV *iadev;
1421*4882a593Smuzhiyun struct rx_buf_desc __iomem *buf_desc_ptr;
1422*4882a593Smuzhiyun unsigned long rx_pkt_start = 0;
1423*4882a593Smuzhiyun void *dle_addr;
1424*4882a593Smuzhiyun struct abr_vc_table *abr_vc_table;
1425*4882a593Smuzhiyun u16 *vc_table;
1426*4882a593Smuzhiyun u16 *reass_table;
1427*4882a593Smuzhiyun int i,j, vcsize_sel;
1428*4882a593Smuzhiyun u_short freeq_st_adr;
1429*4882a593Smuzhiyun u_short *freeq_start;
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1432*4882a593Smuzhiyun // spin_lock_init(&iadev->rx_lock);
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1435*4882a593Smuzhiyun dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1436*4882a593Smuzhiyun &iadev->rx_dle_dma, GFP_KERNEL);
1437*4882a593Smuzhiyun if (!dle_addr) {
1438*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1439*4882a593Smuzhiyun goto err_out;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun iadev->rx_dle_q.start = (struct dle *)dle_addr;
1442*4882a593Smuzhiyun iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1443*4882a593Smuzhiyun iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1444*4882a593Smuzhiyun iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1445*4882a593Smuzhiyun /* the end of the dle q points to the entry after the last
1446*4882a593Smuzhiyun DLE that can be used. */
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun /* write the upper 20 bits of the start address to rx list address register */
1449*4882a593Smuzhiyun /* We know this is 32bit bus addressed so the following is safe */
1450*4882a593Smuzhiyun writel(iadev->rx_dle_dma & 0xfffff000,
1451*4882a593Smuzhiyun iadev->dma + IPHASE5575_RX_LIST_ADDR);
1452*4882a593Smuzhiyun IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1453*4882a593Smuzhiyun iadev->dma+IPHASE5575_TX_LIST_ADDR,
1454*4882a593Smuzhiyun readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1455*4882a593Smuzhiyun printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1456*4882a593Smuzhiyun iadev->dma+IPHASE5575_RX_LIST_ADDR,
1457*4882a593Smuzhiyun readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1460*4882a593Smuzhiyun writew(0, iadev->reass_reg+MODE_REG);
1461*4882a593Smuzhiyun writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun /* Receive side control memory map
1464*4882a593Smuzhiyun -------------------------------
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun Buffer descr 0x0000 (736 - 23K)
1467*4882a593Smuzhiyun VP Table 0x5c00 (256 - 512)
1468*4882a593Smuzhiyun Except q 0x5e00 (128 - 512)
1469*4882a593Smuzhiyun Free buffer q 0x6000 (1K - 2K)
1470*4882a593Smuzhiyun Packet comp q 0x6800 (1K - 2K)
1471*4882a593Smuzhiyun Reass Table 0x7000 (1K - 2K)
1472*4882a593Smuzhiyun VC Table 0x7800 (1K - 2K)
1473*4882a593Smuzhiyun ABR VC Table 0x8000 (1K - 32K)
1474*4882a593Smuzhiyun */
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun /* Base address for Buffer Descriptor Table */
1477*4882a593Smuzhiyun writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1478*4882a593Smuzhiyun /* Set the buffer size register */
1479*4882a593Smuzhiyun writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun /* Initialize each entry in the Buffer Descriptor Table */
1482*4882a593Smuzhiyun iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1483*4882a593Smuzhiyun buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1484*4882a593Smuzhiyun memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1485*4882a593Smuzhiyun buf_desc_ptr++;
1486*4882a593Smuzhiyun rx_pkt_start = iadev->rx_pkt_ram;
1487*4882a593Smuzhiyun for(i=1; i<=iadev->num_rx_desc; i++)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1490*4882a593Smuzhiyun buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1491*4882a593Smuzhiyun buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1492*4882a593Smuzhiyun buf_desc_ptr++;
1493*4882a593Smuzhiyun rx_pkt_start += iadev->rx_buf_sz;
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1496*4882a593Smuzhiyun i = FREE_BUF_DESC_Q*iadev->memSize;
1497*4882a593Smuzhiyun writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1498*4882a593Smuzhiyun writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1499*4882a593Smuzhiyun writew(i+iadev->num_rx_desc*sizeof(u_short),
1500*4882a593Smuzhiyun iadev->reass_reg+FREEQ_ED_ADR);
1501*4882a593Smuzhiyun writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1502*4882a593Smuzhiyun writew(i+iadev->num_rx_desc*sizeof(u_short),
1503*4882a593Smuzhiyun iadev->reass_reg+FREEQ_WR_PTR);
1504*4882a593Smuzhiyun /* Fill the FREEQ with all the free descriptors. */
1505*4882a593Smuzhiyun freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1506*4882a593Smuzhiyun freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1507*4882a593Smuzhiyun for(i=1; i<=iadev->num_rx_desc; i++)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun *freeq_start = (u_short)i;
1510*4882a593Smuzhiyun freeq_start++;
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1513*4882a593Smuzhiyun /* Packet Complete Queue */
1514*4882a593Smuzhiyun i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1515*4882a593Smuzhiyun writew(i, iadev->reass_reg+PCQ_ST_ADR);
1516*4882a593Smuzhiyun writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1517*4882a593Smuzhiyun writew(i, iadev->reass_reg+PCQ_RD_PTR);
1518*4882a593Smuzhiyun writew(i, iadev->reass_reg+PCQ_WR_PTR);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun /* Exception Queue */
1521*4882a593Smuzhiyun i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1522*4882a593Smuzhiyun writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1523*4882a593Smuzhiyun writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1524*4882a593Smuzhiyun iadev->reass_reg+EXCP_Q_ED_ADR);
1525*4882a593Smuzhiyun writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1526*4882a593Smuzhiyun writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun /* Load local copy of FREEQ and PCQ ptrs */
1529*4882a593Smuzhiyun iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1530*4882a593Smuzhiyun iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1531*4882a593Smuzhiyun iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1532*4882a593Smuzhiyun iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1533*4882a593Smuzhiyun iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1534*4882a593Smuzhiyun iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1535*4882a593Smuzhiyun iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1536*4882a593Smuzhiyun iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1539*4882a593Smuzhiyun iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1540*4882a593Smuzhiyun iadev->rfL.pcq_wr);)
1541*4882a593Smuzhiyun /* just for check - no VP TBL */
1542*4882a593Smuzhiyun /* VP Table */
1543*4882a593Smuzhiyun /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1544*4882a593Smuzhiyun /* initialize VP Table for invalid VPIs
1545*4882a593Smuzhiyun - I guess we can write all 1s or 0x000f in the entire memory
1546*4882a593Smuzhiyun space or something similar.
1547*4882a593Smuzhiyun */
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun /* This seems to work and looks right to me too !!! */
1550*4882a593Smuzhiyun i = REASS_TABLE * iadev->memSize;
1551*4882a593Smuzhiyun writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1552*4882a593Smuzhiyun /* initialize Reassembly table to I don't know what ???? */
1553*4882a593Smuzhiyun reass_table = (u16 *)(iadev->reass_ram+i);
1554*4882a593Smuzhiyun j = REASS_TABLE_SZ * iadev->memSize;
1555*4882a593Smuzhiyun for(i=0; i < j; i++)
1556*4882a593Smuzhiyun *reass_table++ = NO_AAL5_PKT;
1557*4882a593Smuzhiyun i = 8*1024;
1558*4882a593Smuzhiyun vcsize_sel = 0;
1559*4882a593Smuzhiyun while (i != iadev->num_vc) {
1560*4882a593Smuzhiyun i /= 2;
1561*4882a593Smuzhiyun vcsize_sel++;
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun i = RX_VC_TABLE * iadev->memSize;
1564*4882a593Smuzhiyun writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1565*4882a593Smuzhiyun vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1566*4882a593Smuzhiyun j = RX_VC_TABLE_SZ * iadev->memSize;
1567*4882a593Smuzhiyun for(i = 0; i < j; i++)
1568*4882a593Smuzhiyun {
1569*4882a593Smuzhiyun /* shift the reassembly pointer by 3 + lower 3 bits of
1570*4882a593Smuzhiyun vc_lkup_base register (=3 for 1K VCs) and the last byte
1571*4882a593Smuzhiyun is those low 3 bits.
1572*4882a593Smuzhiyun Shall program this later.
1573*4882a593Smuzhiyun */
1574*4882a593Smuzhiyun *vc_table = (i << 6) | 15; /* for invalid VCI */
1575*4882a593Smuzhiyun vc_table++;
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun /* ABR VC table */
1578*4882a593Smuzhiyun i = ABR_VC_TABLE * iadev->memSize;
1579*4882a593Smuzhiyun writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun i = ABR_VC_TABLE * iadev->memSize;
1582*4882a593Smuzhiyun abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1583*4882a593Smuzhiyun j = REASS_TABLE_SZ * iadev->memSize;
1584*4882a593Smuzhiyun memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1585*4882a593Smuzhiyun for(i = 0; i < j; i++) {
1586*4882a593Smuzhiyun abr_vc_table->rdf = 0x0003;
1587*4882a593Smuzhiyun abr_vc_table->air = 0x5eb1;
1588*4882a593Smuzhiyun abr_vc_table++;
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun /* Initialize other registers */
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun /* VP Filter Register set for VC Reassembly only */
1594*4882a593Smuzhiyun writew(0xff00, iadev->reass_reg+VP_FILTER);
1595*4882a593Smuzhiyun writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1596*4882a593Smuzhiyun writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun /* Packet Timeout Count related Registers :
1599*4882a593Smuzhiyun Set packet timeout to occur in about 3 seconds
1600*4882a593Smuzhiyun Set Packet Aging Interval count register to overflow in about 4 us
1601*4882a593Smuzhiyun */
1602*4882a593Smuzhiyun writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun i = (j >> 6) & 0xFF;
1605*4882a593Smuzhiyun j += 2 * (j - 1);
1606*4882a593Smuzhiyun i |= ((j << 2) & 0xFF00);
1607*4882a593Smuzhiyun writew(i, iadev->reass_reg+TMOUT_RANGE);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun /* initiate the desc_tble */
1610*4882a593Smuzhiyun for(i=0; i<iadev->num_tx_desc;i++)
1611*4882a593Smuzhiyun iadev->desc_tbl[i].timestamp = 0;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun /* to clear the interrupt status register - read it */
1614*4882a593Smuzhiyun readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun /* Mask Register - clear it */
1617*4882a593Smuzhiyun writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun skb_queue_head_init(&iadev->rx_dma_q);
1620*4882a593Smuzhiyun iadev->rx_free_desc_qhead = NULL;
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1623*4882a593Smuzhiyun if (!iadev->rx_open) {
1624*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1625*4882a593Smuzhiyun dev->number);
1626*4882a593Smuzhiyun goto err_free_dle;
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun iadev->rxing = 1;
1630*4882a593Smuzhiyun iadev->rx_pkt_cnt = 0;
1631*4882a593Smuzhiyun /* Mode Register */
1632*4882a593Smuzhiyun writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1633*4882a593Smuzhiyun return 0;
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun err_free_dle:
1636*4882a593Smuzhiyun dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1637*4882a593Smuzhiyun iadev->rx_dle_dma);
1638*4882a593Smuzhiyun err_out:
1639*4882a593Smuzhiyun return -ENOMEM;
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun /*
1644*4882a593Smuzhiyun The memory map suggested in appendix A and the coding for it.
1645*4882a593Smuzhiyun Keeping it around just in case we change our mind later.
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun Buffer descr 0x0000 (128 - 4K)
1648*4882a593Smuzhiyun UBR sched 0x1000 (1K - 4K)
1649*4882a593Smuzhiyun UBR Wait q 0x2000 (1K - 4K)
1650*4882a593Smuzhiyun Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1651*4882a593Smuzhiyun (128 - 256) each
1652*4882a593Smuzhiyun extended VC 0x4000 (1K - 8K)
1653*4882a593Smuzhiyun ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1654*4882a593Smuzhiyun CBR sched 0x7000 (as needed)
1655*4882a593Smuzhiyun VC table 0x8000 (1K - 32K)
1656*4882a593Smuzhiyun */
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun static void tx_intr(struct atm_dev *dev)
1659*4882a593Smuzhiyun {
1660*4882a593Smuzhiyun IADEV *iadev;
1661*4882a593Smuzhiyun unsigned short status;
1662*4882a593Smuzhiyun unsigned long flags;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1667*4882a593Smuzhiyun if (status & TRANSMIT_DONE){
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun IF_EVENT(printk("Transmit Done Intr logic run\n");)
1670*4882a593Smuzhiyun spin_lock_irqsave(&iadev->tx_lock, flags);
1671*4882a593Smuzhiyun ia_tx_poll(iadev);
1672*4882a593Smuzhiyun spin_unlock_irqrestore(&iadev->tx_lock, flags);
1673*4882a593Smuzhiyun writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1674*4882a593Smuzhiyun if (iadev->close_pending)
1675*4882a593Smuzhiyun wake_up(&iadev->close_wait);
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun if (status & TCQ_NOT_EMPTY)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun static void tx_dle_intr(struct atm_dev *dev)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun IADEV *iadev;
1686*4882a593Smuzhiyun struct dle *dle, *cur_dle;
1687*4882a593Smuzhiyun struct sk_buff *skb;
1688*4882a593Smuzhiyun struct atm_vcc *vcc;
1689*4882a593Smuzhiyun struct ia_vcc *iavcc;
1690*4882a593Smuzhiyun u_int dle_lp;
1691*4882a593Smuzhiyun unsigned long flags;
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1694*4882a593Smuzhiyun spin_lock_irqsave(&iadev->tx_lock, flags);
1695*4882a593Smuzhiyun dle = iadev->tx_dle_q.read;
1696*4882a593Smuzhiyun dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1697*4882a593Smuzhiyun (sizeof(struct dle)*DLE_ENTRIES - 1);
1698*4882a593Smuzhiyun cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1699*4882a593Smuzhiyun while (dle != cur_dle)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun /* free the DMAed skb */
1702*4882a593Smuzhiyun skb = skb_dequeue(&iadev->tx_dma_q);
1703*4882a593Smuzhiyun if (!skb) break;
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1706*4882a593Smuzhiyun if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1707*4882a593Smuzhiyun dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1708*4882a593Smuzhiyun DMA_TO_DEVICE);
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun vcc = ATM_SKB(skb)->vcc;
1711*4882a593Smuzhiyun if (!vcc) {
1712*4882a593Smuzhiyun printk("tx_dle_intr: vcc is null\n");
1713*4882a593Smuzhiyun spin_unlock_irqrestore(&iadev->tx_lock, flags);
1714*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun return;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun iavcc = INPH_IA_VCC(vcc);
1719*4882a593Smuzhiyun if (!iavcc) {
1720*4882a593Smuzhiyun printk("tx_dle_intr: iavcc is null\n");
1721*4882a593Smuzhiyun spin_unlock_irqrestore(&iadev->tx_lock, flags);
1722*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1723*4882a593Smuzhiyun return;
1724*4882a593Smuzhiyun }
1725*4882a593Smuzhiyun if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1726*4882a593Smuzhiyun if ((vcc->pop) && (skb->len != 0))
1727*4882a593Smuzhiyun {
1728*4882a593Smuzhiyun vcc->pop(vcc, skb);
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun else {
1731*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun else { /* Hold the rate-limited skb for flow control */
1735*4882a593Smuzhiyun IA_SKB_STATE(skb) |= IA_DLED;
1736*4882a593Smuzhiyun skb_queue_tail(&iavcc->txing_skb, skb);
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1739*4882a593Smuzhiyun if (++dle == iadev->tx_dle_q.end)
1740*4882a593Smuzhiyun dle = iadev->tx_dle_q.start;
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun iadev->tx_dle_q.read = dle;
1743*4882a593Smuzhiyun spin_unlock_irqrestore(&iadev->tx_lock, flags);
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun static int open_tx(struct atm_vcc *vcc)
1747*4882a593Smuzhiyun {
1748*4882a593Smuzhiyun struct ia_vcc *ia_vcc;
1749*4882a593Smuzhiyun IADEV *iadev;
1750*4882a593Smuzhiyun struct main_vc *vc;
1751*4882a593Smuzhiyun struct ext_vc *evc;
1752*4882a593Smuzhiyun int ret;
1753*4882a593Smuzhiyun IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1754*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1755*4882a593Smuzhiyun iadev = INPH_IA_DEV(vcc->dev);
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun if (iadev->phy_type & FE_25MBIT_PHY) {
1758*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1759*4882a593Smuzhiyun printk("IA: ABR not support\n");
1760*4882a593Smuzhiyun return -EINVAL;
1761*4882a593Smuzhiyun }
1762*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1763*4882a593Smuzhiyun printk("IA: CBR not support\n");
1764*4882a593Smuzhiyun return -EINVAL;
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun ia_vcc = INPH_IA_VCC(vcc);
1768*4882a593Smuzhiyun memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1769*4882a593Smuzhiyun if (vcc->qos.txtp.max_sdu >
1770*4882a593Smuzhiyun (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1771*4882a593Smuzhiyun printk("IA: SDU size over (%d) the configured SDU size %d\n",
1772*4882a593Smuzhiyun vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1773*4882a593Smuzhiyun vcc->dev_data = NULL;
1774*4882a593Smuzhiyun kfree(ia_vcc);
1775*4882a593Smuzhiyun return -EINVAL;
1776*4882a593Smuzhiyun }
1777*4882a593Smuzhiyun ia_vcc->vc_desc_cnt = 0;
1778*4882a593Smuzhiyun ia_vcc->txing = 1;
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun /* find pcr */
1781*4882a593Smuzhiyun if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1782*4882a593Smuzhiyun vcc->qos.txtp.pcr = iadev->LineRate;
1783*4882a593Smuzhiyun else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1784*4882a593Smuzhiyun vcc->qos.txtp.pcr = iadev->LineRate;
1785*4882a593Smuzhiyun else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1786*4882a593Smuzhiyun vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1787*4882a593Smuzhiyun if (vcc->qos.txtp.pcr > iadev->LineRate)
1788*4882a593Smuzhiyun vcc->qos.txtp.pcr = iadev->LineRate;
1789*4882a593Smuzhiyun ia_vcc->pcr = vcc->qos.txtp.pcr;
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1792*4882a593Smuzhiyun else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1793*4882a593Smuzhiyun else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1794*4882a593Smuzhiyun else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1795*4882a593Smuzhiyun if (ia_vcc->pcr < iadev->rate_limit)
1796*4882a593Smuzhiyun skb_queue_head_init (&ia_vcc->txing_skb);
1797*4882a593Smuzhiyun if (ia_vcc->pcr < iadev->rate_limit) {
1798*4882a593Smuzhiyun struct sock *sk = sk_atm(vcc);
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun if (vcc->qos.txtp.max_sdu != 0) {
1801*4882a593Smuzhiyun if (ia_vcc->pcr > 60000)
1802*4882a593Smuzhiyun sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1803*4882a593Smuzhiyun else if (ia_vcc->pcr > 2000)
1804*4882a593Smuzhiyun sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1805*4882a593Smuzhiyun else
1806*4882a593Smuzhiyun sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun else
1809*4882a593Smuzhiyun sk->sk_sndbuf = 24576;
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1813*4882a593Smuzhiyun evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1814*4882a593Smuzhiyun vc += vcc->vci;
1815*4882a593Smuzhiyun evc += vcc->vci;
1816*4882a593Smuzhiyun memset((caddr_t)vc, 0, sizeof(*vc));
1817*4882a593Smuzhiyun memset((caddr_t)evc, 0, sizeof(*evc));
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun /* store the most significant 4 bits of vci as the last 4 bits
1820*4882a593Smuzhiyun of first part of atm header.
1821*4882a593Smuzhiyun store the last 12 bits of vci as first 12 bits of the second
1822*4882a593Smuzhiyun part of the atm header.
1823*4882a593Smuzhiyun */
1824*4882a593Smuzhiyun evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1825*4882a593Smuzhiyun evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun /* check the following for different traffic classes */
1828*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class == ATM_UBR)
1829*4882a593Smuzhiyun {
1830*4882a593Smuzhiyun vc->type = UBR;
1831*4882a593Smuzhiyun vc->status = CRC_APPEND;
1832*4882a593Smuzhiyun vc->acr = cellrate_to_float(iadev->LineRate);
1833*4882a593Smuzhiyun if (vcc->qos.txtp.pcr > 0)
1834*4882a593Smuzhiyun vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1835*4882a593Smuzhiyun IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1836*4882a593Smuzhiyun vcc->qos.txtp.max_pcr,vc->acr);)
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1839*4882a593Smuzhiyun { srv_cls_param_t srv_p;
1840*4882a593Smuzhiyun IF_ABR(printk("Tx ABR VCC\n");)
1841*4882a593Smuzhiyun init_abr_vc(iadev, &srv_p);
1842*4882a593Smuzhiyun if (vcc->qos.txtp.pcr > 0)
1843*4882a593Smuzhiyun srv_p.pcr = vcc->qos.txtp.pcr;
1844*4882a593Smuzhiyun if (vcc->qos.txtp.min_pcr > 0) {
1845*4882a593Smuzhiyun int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1846*4882a593Smuzhiyun if (tmpsum > iadev->LineRate)
1847*4882a593Smuzhiyun return -EBUSY;
1848*4882a593Smuzhiyun srv_p.mcr = vcc->qos.txtp.min_pcr;
1849*4882a593Smuzhiyun iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun else srv_p.mcr = 0;
1852*4882a593Smuzhiyun if (vcc->qos.txtp.icr)
1853*4882a593Smuzhiyun srv_p.icr = vcc->qos.txtp.icr;
1854*4882a593Smuzhiyun if (vcc->qos.txtp.tbe)
1855*4882a593Smuzhiyun srv_p.tbe = vcc->qos.txtp.tbe;
1856*4882a593Smuzhiyun if (vcc->qos.txtp.frtt)
1857*4882a593Smuzhiyun srv_p.frtt = vcc->qos.txtp.frtt;
1858*4882a593Smuzhiyun if (vcc->qos.txtp.rif)
1859*4882a593Smuzhiyun srv_p.rif = vcc->qos.txtp.rif;
1860*4882a593Smuzhiyun if (vcc->qos.txtp.rdf)
1861*4882a593Smuzhiyun srv_p.rdf = vcc->qos.txtp.rdf;
1862*4882a593Smuzhiyun if (vcc->qos.txtp.nrm_pres)
1863*4882a593Smuzhiyun srv_p.nrm = vcc->qos.txtp.nrm;
1864*4882a593Smuzhiyun if (vcc->qos.txtp.trm_pres)
1865*4882a593Smuzhiyun srv_p.trm = vcc->qos.txtp.trm;
1866*4882a593Smuzhiyun if (vcc->qos.txtp.adtf_pres)
1867*4882a593Smuzhiyun srv_p.adtf = vcc->qos.txtp.adtf;
1868*4882a593Smuzhiyun if (vcc->qos.txtp.cdf_pres)
1869*4882a593Smuzhiyun srv_p.cdf = vcc->qos.txtp.cdf;
1870*4882a593Smuzhiyun if (srv_p.icr > srv_p.pcr)
1871*4882a593Smuzhiyun srv_p.icr = srv_p.pcr;
1872*4882a593Smuzhiyun IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1873*4882a593Smuzhiyun srv_p.pcr, srv_p.mcr);)
1874*4882a593Smuzhiyun ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1875*4882a593Smuzhiyun } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1876*4882a593Smuzhiyun if (iadev->phy_type & FE_25MBIT_PHY) {
1877*4882a593Smuzhiyun printk("IA: CBR not support\n");
1878*4882a593Smuzhiyun return -EINVAL;
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1881*4882a593Smuzhiyun IF_CBR(printk("PCR is not available\n");)
1882*4882a593Smuzhiyun return -1;
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun vc->type = CBR;
1885*4882a593Smuzhiyun vc->status = CRC_APPEND;
1886*4882a593Smuzhiyun if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1887*4882a593Smuzhiyun return ret;
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun } else {
1890*4882a593Smuzhiyun printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1894*4882a593Smuzhiyun IF_EVENT(printk("ia open_tx returning \n");)
1895*4882a593Smuzhiyun return 0;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun static int tx_init(struct atm_dev *dev)
1900*4882a593Smuzhiyun {
1901*4882a593Smuzhiyun IADEV *iadev;
1902*4882a593Smuzhiyun struct tx_buf_desc *buf_desc_ptr;
1903*4882a593Smuzhiyun unsigned int tx_pkt_start;
1904*4882a593Smuzhiyun void *dle_addr;
1905*4882a593Smuzhiyun int i;
1906*4882a593Smuzhiyun u_short tcq_st_adr;
1907*4882a593Smuzhiyun u_short *tcq_start;
1908*4882a593Smuzhiyun u_short prq_st_adr;
1909*4882a593Smuzhiyun u_short *prq_start;
1910*4882a593Smuzhiyun struct main_vc *vc;
1911*4882a593Smuzhiyun struct ext_vc *evc;
1912*4882a593Smuzhiyun u_short tmp16;
1913*4882a593Smuzhiyun u32 vcsize_sel;
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
1916*4882a593Smuzhiyun spin_lock_init(&iadev->tx_lock);
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1919*4882a593Smuzhiyun readw(iadev->seg_reg+SEG_MASK_REG));)
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun /* Allocate 4k (boundary aligned) bytes */
1922*4882a593Smuzhiyun dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1923*4882a593Smuzhiyun &iadev->tx_dle_dma, GFP_KERNEL);
1924*4882a593Smuzhiyun if (!dle_addr) {
1925*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1926*4882a593Smuzhiyun goto err_out;
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun iadev->tx_dle_q.start = (struct dle*)dle_addr;
1929*4882a593Smuzhiyun iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1930*4882a593Smuzhiyun iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1931*4882a593Smuzhiyun iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun /* write the upper 20 bits of the start address to tx list address register */
1934*4882a593Smuzhiyun writel(iadev->tx_dle_dma & 0xfffff000,
1935*4882a593Smuzhiyun iadev->dma + IPHASE5575_TX_LIST_ADDR);
1936*4882a593Smuzhiyun writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1937*4882a593Smuzhiyun writew(0, iadev->seg_reg+MODE_REG_0);
1938*4882a593Smuzhiyun writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1939*4882a593Smuzhiyun iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1940*4882a593Smuzhiyun iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1941*4882a593Smuzhiyun iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun /*
1944*4882a593Smuzhiyun Transmit side control memory map
1945*4882a593Smuzhiyun --------------------------------
1946*4882a593Smuzhiyun Buffer descr 0x0000 (128 - 4K)
1947*4882a593Smuzhiyun Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1948*4882a593Smuzhiyun (512 - 1K) each
1949*4882a593Smuzhiyun TCQ - 4K, PRQ - 5K
1950*4882a593Smuzhiyun CBR Table 0x1800 (as needed) - 6K
1951*4882a593Smuzhiyun UBR Table 0x3000 (1K - 4K) - 12K
1952*4882a593Smuzhiyun UBR Wait queue 0x4000 (1K - 4K) - 16K
1953*4882a593Smuzhiyun ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1954*4882a593Smuzhiyun ABR Tbl - 20K, ABR Wq - 22K
1955*4882a593Smuzhiyun extended VC 0x6000 (1K - 8K) - 24K
1956*4882a593Smuzhiyun VC Table 0x8000 (1K - 32K) - 32K
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1959*4882a593Smuzhiyun and Wait q, which can be allotted later.
1960*4882a593Smuzhiyun */
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun /* Buffer Descriptor Table Base address */
1963*4882a593Smuzhiyun writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun /* initialize each entry in the buffer descriptor table */
1966*4882a593Smuzhiyun buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1967*4882a593Smuzhiyun memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1968*4882a593Smuzhiyun buf_desc_ptr++;
1969*4882a593Smuzhiyun tx_pkt_start = TX_PACKET_RAM;
1970*4882a593Smuzhiyun for(i=1; i<=iadev->num_tx_desc; i++)
1971*4882a593Smuzhiyun {
1972*4882a593Smuzhiyun memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1973*4882a593Smuzhiyun buf_desc_ptr->desc_mode = AAL5;
1974*4882a593Smuzhiyun buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1975*4882a593Smuzhiyun buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1976*4882a593Smuzhiyun buf_desc_ptr++;
1977*4882a593Smuzhiyun tx_pkt_start += iadev->tx_buf_sz;
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1980*4882a593Smuzhiyun sizeof(*iadev->tx_buf),
1981*4882a593Smuzhiyun GFP_KERNEL);
1982*4882a593Smuzhiyun if (!iadev->tx_buf) {
1983*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1984*4882a593Smuzhiyun goto err_free_dle;
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun for (i= 0; i< iadev->num_tx_desc; i++)
1987*4882a593Smuzhiyun {
1988*4882a593Smuzhiyun struct cpcs_trailer *cpcs;
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1991*4882a593Smuzhiyun if(!cpcs) {
1992*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1993*4882a593Smuzhiyun goto err_free_tx_bufs;
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun iadev->tx_buf[i].cpcs = cpcs;
1996*4882a593Smuzhiyun iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1997*4882a593Smuzhiyun cpcs,
1998*4882a593Smuzhiyun sizeof(*cpcs),
1999*4882a593Smuzhiyun DMA_TO_DEVICE);
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2002*4882a593Smuzhiyun sizeof(*iadev->desc_tbl),
2003*4882a593Smuzhiyun GFP_KERNEL);
2004*4882a593Smuzhiyun if (!iadev->desc_tbl) {
2005*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2006*4882a593Smuzhiyun goto err_free_all_tx_bufs;
2007*4882a593Smuzhiyun }
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun /* Communication Queues base address */
2010*4882a593Smuzhiyun i = TX_COMP_Q * iadev->memSize;
2011*4882a593Smuzhiyun writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun /* Transmit Complete Queue */
2014*4882a593Smuzhiyun writew(i, iadev->seg_reg+TCQ_ST_ADR);
2015*4882a593Smuzhiyun writew(i, iadev->seg_reg+TCQ_RD_PTR);
2016*4882a593Smuzhiyun writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2017*4882a593Smuzhiyun iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2018*4882a593Smuzhiyun writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2019*4882a593Smuzhiyun iadev->seg_reg+TCQ_ED_ADR);
2020*4882a593Smuzhiyun /* Fill the TCQ with all the free descriptors. */
2021*4882a593Smuzhiyun tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2022*4882a593Smuzhiyun tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2023*4882a593Smuzhiyun for(i=1; i<=iadev->num_tx_desc; i++)
2024*4882a593Smuzhiyun {
2025*4882a593Smuzhiyun *tcq_start = (u_short)i;
2026*4882a593Smuzhiyun tcq_start++;
2027*4882a593Smuzhiyun }
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun /* Packet Ready Queue */
2030*4882a593Smuzhiyun i = PKT_RDY_Q * iadev->memSize;
2031*4882a593Smuzhiyun writew(i, iadev->seg_reg+PRQ_ST_ADR);
2032*4882a593Smuzhiyun writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2033*4882a593Smuzhiyun iadev->seg_reg+PRQ_ED_ADR);
2034*4882a593Smuzhiyun writew(i, iadev->seg_reg+PRQ_RD_PTR);
2035*4882a593Smuzhiyun writew(i, iadev->seg_reg+PRQ_WR_PTR);
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun /* Load local copy of PRQ and TCQ ptrs */
2038*4882a593Smuzhiyun iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2039*4882a593Smuzhiyun iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2040*4882a593Smuzhiyun iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2043*4882a593Smuzhiyun iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2044*4882a593Smuzhiyun iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun /* Just for safety initializing the queue to have desc 1 always */
2047*4882a593Smuzhiyun /* Fill the PRQ with all the free descriptors. */
2048*4882a593Smuzhiyun prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2049*4882a593Smuzhiyun prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2050*4882a593Smuzhiyun for(i=1; i<=iadev->num_tx_desc; i++)
2051*4882a593Smuzhiyun {
2052*4882a593Smuzhiyun *prq_start = (u_short)0; /* desc 1 in all entries */
2053*4882a593Smuzhiyun prq_start++;
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun /* CBR Table */
2056*4882a593Smuzhiyun IF_INIT(printk("Start CBR Init\n");)
2057*4882a593Smuzhiyun #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2058*4882a593Smuzhiyun writew(0,iadev->seg_reg+CBR_PTR_BASE);
2059*4882a593Smuzhiyun #else /* Charlie's logic is wrong ? */
2060*4882a593Smuzhiyun tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2061*4882a593Smuzhiyun IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2062*4882a593Smuzhiyun writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2063*4882a593Smuzhiyun #endif
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun IF_INIT(printk("value in register = 0x%x\n",
2066*4882a593Smuzhiyun readw(iadev->seg_reg+CBR_PTR_BASE));)
2067*4882a593Smuzhiyun tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2068*4882a593Smuzhiyun writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2069*4882a593Smuzhiyun IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2070*4882a593Smuzhiyun readw(iadev->seg_reg+CBR_TAB_BEG));)
2071*4882a593Smuzhiyun writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2072*4882a593Smuzhiyun tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2073*4882a593Smuzhiyun writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2074*4882a593Smuzhiyun IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2075*4882a593Smuzhiyun iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2076*4882a593Smuzhiyun IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2077*4882a593Smuzhiyun readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2078*4882a593Smuzhiyun readw(iadev->seg_reg+CBR_TAB_END+1));)
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun /* Initialize the CBR Schedualing Table */
2081*4882a593Smuzhiyun memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2082*4882a593Smuzhiyun 0, iadev->num_vc*6);
2083*4882a593Smuzhiyun iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2084*4882a593Smuzhiyun iadev->CbrEntryPt = 0;
2085*4882a593Smuzhiyun iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2086*4882a593Smuzhiyun iadev->NumEnabledCBR = 0;
2087*4882a593Smuzhiyun
2088*4882a593Smuzhiyun /* UBR scheduling Table and wait queue */
2089*4882a593Smuzhiyun /* initialize all bytes of UBR scheduler table and wait queue to 0
2090*4882a593Smuzhiyun - SCHEDSZ is 1K (# of entries).
2091*4882a593Smuzhiyun - UBR Table size is 4K
2092*4882a593Smuzhiyun - UBR wait queue is 4K
2093*4882a593Smuzhiyun since the table and wait queues are contiguous, all the bytes
2094*4882a593Smuzhiyun can be initialized by one memeset.
2095*4882a593Smuzhiyun */
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun vcsize_sel = 0;
2098*4882a593Smuzhiyun i = 8*1024;
2099*4882a593Smuzhiyun while (i != iadev->num_vc) {
2100*4882a593Smuzhiyun i /= 2;
2101*4882a593Smuzhiyun vcsize_sel++;
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun i = MAIN_VC_TABLE * iadev->memSize;
2105*4882a593Smuzhiyun writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2106*4882a593Smuzhiyun i = EXT_VC_TABLE * iadev->memSize;
2107*4882a593Smuzhiyun writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2108*4882a593Smuzhiyun i = UBR_SCHED_TABLE * iadev->memSize;
2109*4882a593Smuzhiyun writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2110*4882a593Smuzhiyun i = UBR_WAIT_Q * iadev->memSize;
2111*4882a593Smuzhiyun writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2112*4882a593Smuzhiyun memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2113*4882a593Smuzhiyun 0, iadev->num_vc*8);
2114*4882a593Smuzhiyun /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2115*4882a593Smuzhiyun /* initialize all bytes of ABR scheduler table and wait queue to 0
2116*4882a593Smuzhiyun - SCHEDSZ is 1K (# of entries).
2117*4882a593Smuzhiyun - ABR Table size is 2K
2118*4882a593Smuzhiyun - ABR wait queue is 2K
2119*4882a593Smuzhiyun since the table and wait queues are contiguous, all the bytes
2120*4882a593Smuzhiyun can be initialized by one memeset.
2121*4882a593Smuzhiyun */
2122*4882a593Smuzhiyun i = ABR_SCHED_TABLE * iadev->memSize;
2123*4882a593Smuzhiyun writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2124*4882a593Smuzhiyun i = ABR_WAIT_Q * iadev->memSize;
2125*4882a593Smuzhiyun writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun i = ABR_SCHED_TABLE*iadev->memSize;
2128*4882a593Smuzhiyun memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2129*4882a593Smuzhiyun vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2130*4882a593Smuzhiyun evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2131*4882a593Smuzhiyun iadev->testTable = kmalloc_array(iadev->num_vc,
2132*4882a593Smuzhiyun sizeof(*iadev->testTable),
2133*4882a593Smuzhiyun GFP_KERNEL);
2134*4882a593Smuzhiyun if (!iadev->testTable) {
2135*4882a593Smuzhiyun printk("Get freepage failed\n");
2136*4882a593Smuzhiyun goto err_free_desc_tbl;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun for(i=0; i<iadev->num_vc; i++)
2139*4882a593Smuzhiyun {
2140*4882a593Smuzhiyun memset((caddr_t)vc, 0, sizeof(*vc));
2141*4882a593Smuzhiyun memset((caddr_t)evc, 0, sizeof(*evc));
2142*4882a593Smuzhiyun iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2143*4882a593Smuzhiyun GFP_KERNEL);
2144*4882a593Smuzhiyun if (!iadev->testTable[i])
2145*4882a593Smuzhiyun goto err_free_test_tables;
2146*4882a593Smuzhiyun iadev->testTable[i]->lastTime = 0;
2147*4882a593Smuzhiyun iadev->testTable[i]->fract = 0;
2148*4882a593Smuzhiyun iadev->testTable[i]->vc_status = VC_UBR;
2149*4882a593Smuzhiyun vc++;
2150*4882a593Smuzhiyun evc++;
2151*4882a593Smuzhiyun }
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun /* Other Initialization */
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun /* Max Rate Register */
2156*4882a593Smuzhiyun if (iadev->phy_type & FE_25MBIT_PHY) {
2157*4882a593Smuzhiyun writew(RATE25, iadev->seg_reg+MAXRATE);
2158*4882a593Smuzhiyun writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2159*4882a593Smuzhiyun }
2160*4882a593Smuzhiyun else {
2161*4882a593Smuzhiyun writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2162*4882a593Smuzhiyun writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun /* Set Idle Header Reigisters to be sure */
2165*4882a593Smuzhiyun writew(0, iadev->seg_reg+IDLEHEADHI);
2166*4882a593Smuzhiyun writew(0, iadev->seg_reg+IDLEHEADLO);
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2169*4882a593Smuzhiyun writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun iadev->close_pending = 0;
2172*4882a593Smuzhiyun init_waitqueue_head(&iadev->close_wait);
2173*4882a593Smuzhiyun init_waitqueue_head(&iadev->timeout_wait);
2174*4882a593Smuzhiyun skb_queue_head_init(&iadev->tx_dma_q);
2175*4882a593Smuzhiyun ia_init_rtn_q(&iadev->tx_return_q);
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun /* RM Cell Protocol ID and Message Type */
2178*4882a593Smuzhiyun writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2179*4882a593Smuzhiyun skb_queue_head_init (&iadev->tx_backlog);
2180*4882a593Smuzhiyun
2181*4882a593Smuzhiyun /* Mode Register 1 */
2182*4882a593Smuzhiyun writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun /* Mode Register 0 */
2185*4882a593Smuzhiyun writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun /* Interrupt Status Register - read to clear */
2188*4882a593Smuzhiyun readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2191*4882a593Smuzhiyun writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2192*4882a593Smuzhiyun writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2193*4882a593Smuzhiyun iadev->tx_pkt_cnt = 0;
2194*4882a593Smuzhiyun iadev->rate_limit = iadev->LineRate / 3;
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun return 0;
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun err_free_test_tables:
2199*4882a593Smuzhiyun while (--i >= 0)
2200*4882a593Smuzhiyun kfree(iadev->testTable[i]);
2201*4882a593Smuzhiyun kfree(iadev->testTable);
2202*4882a593Smuzhiyun err_free_desc_tbl:
2203*4882a593Smuzhiyun kfree(iadev->desc_tbl);
2204*4882a593Smuzhiyun err_free_all_tx_bufs:
2205*4882a593Smuzhiyun i = iadev->num_tx_desc;
2206*4882a593Smuzhiyun err_free_tx_bufs:
2207*4882a593Smuzhiyun while (--i >= 0) {
2208*4882a593Smuzhiyun struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2211*4882a593Smuzhiyun sizeof(*desc->cpcs), DMA_TO_DEVICE);
2212*4882a593Smuzhiyun kfree(desc->cpcs);
2213*4882a593Smuzhiyun }
2214*4882a593Smuzhiyun kfree(iadev->tx_buf);
2215*4882a593Smuzhiyun err_free_dle:
2216*4882a593Smuzhiyun dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2217*4882a593Smuzhiyun iadev->tx_dle_dma);
2218*4882a593Smuzhiyun err_out:
2219*4882a593Smuzhiyun return -ENOMEM;
2220*4882a593Smuzhiyun }
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun static irqreturn_t ia_int(int irq, void *dev_id)
2223*4882a593Smuzhiyun {
2224*4882a593Smuzhiyun struct atm_dev *dev;
2225*4882a593Smuzhiyun IADEV *iadev;
2226*4882a593Smuzhiyun unsigned int status;
2227*4882a593Smuzhiyun int handled = 0;
2228*4882a593Smuzhiyun
2229*4882a593Smuzhiyun dev = dev_id;
2230*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
2231*4882a593Smuzhiyun while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2232*4882a593Smuzhiyun {
2233*4882a593Smuzhiyun handled = 1;
2234*4882a593Smuzhiyun IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2235*4882a593Smuzhiyun if (status & STAT_REASSINT)
2236*4882a593Smuzhiyun {
2237*4882a593Smuzhiyun /* do something */
2238*4882a593Smuzhiyun IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2239*4882a593Smuzhiyun rx_intr(dev);
2240*4882a593Smuzhiyun }
2241*4882a593Smuzhiyun if (status & STAT_DLERINT)
2242*4882a593Smuzhiyun {
2243*4882a593Smuzhiyun /* Clear this bit by writing a 1 to it. */
2244*4882a593Smuzhiyun writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2245*4882a593Smuzhiyun rx_dle_intr(dev);
2246*4882a593Smuzhiyun }
2247*4882a593Smuzhiyun if (status & STAT_SEGINT)
2248*4882a593Smuzhiyun {
2249*4882a593Smuzhiyun /* do something */
2250*4882a593Smuzhiyun IF_EVENT(printk("IA: tx_intr \n");)
2251*4882a593Smuzhiyun tx_intr(dev);
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun if (status & STAT_DLETINT)
2254*4882a593Smuzhiyun {
2255*4882a593Smuzhiyun writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2256*4882a593Smuzhiyun tx_dle_intr(dev);
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2259*4882a593Smuzhiyun {
2260*4882a593Smuzhiyun if (status & STAT_FEINT)
2261*4882a593Smuzhiyun ia_frontend_intr(iadev);
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun return IRQ_RETVAL(handled);
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun /*----------------------------- entries --------------------------------*/
2270*4882a593Smuzhiyun static int get_esi(struct atm_dev *dev)
2271*4882a593Smuzhiyun {
2272*4882a593Smuzhiyun IADEV *iadev;
2273*4882a593Smuzhiyun int i;
2274*4882a593Smuzhiyun u32 mac1;
2275*4882a593Smuzhiyun u16 mac2;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
2278*4882a593Smuzhiyun mac1 = cpu_to_be32(le32_to_cpu(readl(
2279*4882a593Smuzhiyun iadev->reg+IPHASE5575_MAC1)));
2280*4882a593Smuzhiyun mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2281*4882a593Smuzhiyun IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2282*4882a593Smuzhiyun for (i=0; i<MAC1_LEN; i++)
2283*4882a593Smuzhiyun dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun for (i=0; i<MAC2_LEN; i++)
2286*4882a593Smuzhiyun dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2287*4882a593Smuzhiyun return 0;
2288*4882a593Smuzhiyun }
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun static int reset_sar(struct atm_dev *dev)
2291*4882a593Smuzhiyun {
2292*4882a593Smuzhiyun IADEV *iadev;
2293*4882a593Smuzhiyun int i, error = 1;
2294*4882a593Smuzhiyun unsigned int pci[64];
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
2297*4882a593Smuzhiyun for(i=0; i<64; i++)
2298*4882a593Smuzhiyun if ((error = pci_read_config_dword(iadev->pci,
2299*4882a593Smuzhiyun i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2300*4882a593Smuzhiyun return error;
2301*4882a593Smuzhiyun writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2302*4882a593Smuzhiyun for(i=0; i<64; i++)
2303*4882a593Smuzhiyun if ((error = pci_write_config_dword(iadev->pci,
2304*4882a593Smuzhiyun i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2305*4882a593Smuzhiyun return error;
2306*4882a593Smuzhiyun udelay(5);
2307*4882a593Smuzhiyun return 0;
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun static int ia_init(struct atm_dev *dev)
2312*4882a593Smuzhiyun {
2313*4882a593Smuzhiyun IADEV *iadev;
2314*4882a593Smuzhiyun unsigned long real_base;
2315*4882a593Smuzhiyun void __iomem *base;
2316*4882a593Smuzhiyun unsigned short command;
2317*4882a593Smuzhiyun int error, i;
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun /* The device has been identified and registered. Now we read
2320*4882a593Smuzhiyun necessary configuration info like memory base address,
2321*4882a593Smuzhiyun interrupt number etc */
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun IF_INIT(printk(">ia_init\n");)
2324*4882a593Smuzhiyun dev->ci_range.vpi_bits = 0;
2325*4882a593Smuzhiyun dev->ci_range.vci_bits = NR_VCI_LD;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
2328*4882a593Smuzhiyun real_base = pci_resource_start (iadev->pci, 0);
2329*4882a593Smuzhiyun iadev->irq = iadev->pci->irq;
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2332*4882a593Smuzhiyun if (error) {
2333*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2334*4882a593Smuzhiyun dev->number,error);
2335*4882a593Smuzhiyun return -EINVAL;
2336*4882a593Smuzhiyun }
2337*4882a593Smuzhiyun IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2338*4882a593Smuzhiyun dev->number, iadev->pci->revision, real_base, iadev->irq);)
2339*4882a593Smuzhiyun
2340*4882a593Smuzhiyun /* find mapping size of board */
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun if (iadev->pci_map_size == 0x100000){
2345*4882a593Smuzhiyun iadev->num_vc = 4096;
2346*4882a593Smuzhiyun dev->ci_range.vci_bits = NR_VCI_4K_LD;
2347*4882a593Smuzhiyun iadev->memSize = 4;
2348*4882a593Smuzhiyun }
2349*4882a593Smuzhiyun else if (iadev->pci_map_size == 0x40000) {
2350*4882a593Smuzhiyun iadev->num_vc = 1024;
2351*4882a593Smuzhiyun iadev->memSize = 1;
2352*4882a593Smuzhiyun }
2353*4882a593Smuzhiyun else {
2354*4882a593Smuzhiyun printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2355*4882a593Smuzhiyun return -EINVAL;
2356*4882a593Smuzhiyun }
2357*4882a593Smuzhiyun IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun /* enable bus mastering */
2360*4882a593Smuzhiyun pci_set_master(iadev->pci);
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun /*
2363*4882a593Smuzhiyun * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2364*4882a593Smuzhiyun */
2365*4882a593Smuzhiyun udelay(10);
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun /* mapping the physical address to a virtual address in address space */
2368*4882a593Smuzhiyun base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun if (!base)
2371*4882a593Smuzhiyun {
2372*4882a593Smuzhiyun printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2373*4882a593Smuzhiyun dev->number);
2374*4882a593Smuzhiyun return -ENOMEM;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2377*4882a593Smuzhiyun dev->number, iadev->pci->revision, base, iadev->irq);)
2378*4882a593Smuzhiyun
2379*4882a593Smuzhiyun /* filling the iphase dev structure */
2380*4882a593Smuzhiyun iadev->mem = iadev->pci_map_size /2;
2381*4882a593Smuzhiyun iadev->real_base = real_base;
2382*4882a593Smuzhiyun iadev->base = base;
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun /* Bus Interface Control Registers */
2385*4882a593Smuzhiyun iadev->reg = base + REG_BASE;
2386*4882a593Smuzhiyun /* Segmentation Control Registers */
2387*4882a593Smuzhiyun iadev->seg_reg = base + SEG_BASE;
2388*4882a593Smuzhiyun /* Reassembly Control Registers */
2389*4882a593Smuzhiyun iadev->reass_reg = base + REASS_BASE;
2390*4882a593Smuzhiyun /* Front end/ DMA control registers */
2391*4882a593Smuzhiyun iadev->phy = base + PHY_BASE;
2392*4882a593Smuzhiyun iadev->dma = base + PHY_BASE;
2393*4882a593Smuzhiyun /* RAM - Segmentation RAm and Reassembly RAM */
2394*4882a593Smuzhiyun iadev->ram = base + ACTUAL_RAM_BASE;
2395*4882a593Smuzhiyun iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2396*4882a593Smuzhiyun iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun /* lets print out the above */
2399*4882a593Smuzhiyun IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2400*4882a593Smuzhiyun iadev->reg,iadev->seg_reg,iadev->reass_reg,
2401*4882a593Smuzhiyun iadev->phy, iadev->ram, iadev->seg_ram,
2402*4882a593Smuzhiyun iadev->reass_ram);)
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun /* lets try reading the MAC address */
2405*4882a593Smuzhiyun error = get_esi(dev);
2406*4882a593Smuzhiyun if (error) {
2407*4882a593Smuzhiyun iounmap(iadev->base);
2408*4882a593Smuzhiyun return error;
2409*4882a593Smuzhiyun }
2410*4882a593Smuzhiyun printk("IA: ");
2411*4882a593Smuzhiyun for (i=0; i < ESI_LEN; i++)
2412*4882a593Smuzhiyun printk("%s%02X",i ? "-" : "",dev->esi[i]);
2413*4882a593Smuzhiyun printk("\n");
2414*4882a593Smuzhiyun
2415*4882a593Smuzhiyun /* reset SAR */
2416*4882a593Smuzhiyun if (reset_sar(dev)) {
2417*4882a593Smuzhiyun iounmap(iadev->base);
2418*4882a593Smuzhiyun printk("IA: reset SAR fail, please try again\n");
2419*4882a593Smuzhiyun return 1;
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun return 0;
2422*4882a593Smuzhiyun }
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun static void ia_update_stats(IADEV *iadev) {
2425*4882a593Smuzhiyun if (!iadev->carrier_detect)
2426*4882a593Smuzhiyun return;
2427*4882a593Smuzhiyun iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2428*4882a593Smuzhiyun iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2429*4882a593Smuzhiyun iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2430*4882a593Smuzhiyun iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2431*4882a593Smuzhiyun iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2432*4882a593Smuzhiyun iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2433*4882a593Smuzhiyun return;
2434*4882a593Smuzhiyun }
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun static void ia_led_timer(struct timer_list *unused) {
2437*4882a593Smuzhiyun unsigned long flags;
2438*4882a593Smuzhiyun static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2439*4882a593Smuzhiyun u_char i;
2440*4882a593Smuzhiyun static u32 ctrl_reg;
2441*4882a593Smuzhiyun for (i = 0; i < iadev_count; i++) {
2442*4882a593Smuzhiyun if (ia_dev[i]) {
2443*4882a593Smuzhiyun ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2444*4882a593Smuzhiyun if (blinking[i] == 0) {
2445*4882a593Smuzhiyun blinking[i]++;
2446*4882a593Smuzhiyun ctrl_reg &= (~CTRL_LED);
2447*4882a593Smuzhiyun writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2448*4882a593Smuzhiyun ia_update_stats(ia_dev[i]);
2449*4882a593Smuzhiyun }
2450*4882a593Smuzhiyun else {
2451*4882a593Smuzhiyun blinking[i] = 0;
2452*4882a593Smuzhiyun ctrl_reg |= CTRL_LED;
2453*4882a593Smuzhiyun writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2454*4882a593Smuzhiyun spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2455*4882a593Smuzhiyun if (ia_dev[i]->close_pending)
2456*4882a593Smuzhiyun wake_up(&ia_dev[i]->close_wait);
2457*4882a593Smuzhiyun ia_tx_poll(ia_dev[i]);
2458*4882a593Smuzhiyun spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2459*4882a593Smuzhiyun }
2460*4882a593Smuzhiyun }
2461*4882a593Smuzhiyun }
2462*4882a593Smuzhiyun mod_timer(&ia_timer, jiffies + HZ / 4);
2463*4882a593Smuzhiyun return;
2464*4882a593Smuzhiyun }
2465*4882a593Smuzhiyun
2466*4882a593Smuzhiyun static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2467*4882a593Smuzhiyun unsigned long addr)
2468*4882a593Smuzhiyun {
2469*4882a593Smuzhiyun writel(value, INPH_IA_DEV(dev)->phy+addr);
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2473*4882a593Smuzhiyun {
2474*4882a593Smuzhiyun return readl(INPH_IA_DEV(dev)->phy+addr);
2475*4882a593Smuzhiyun }
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun static void ia_free_tx(IADEV *iadev)
2478*4882a593Smuzhiyun {
2479*4882a593Smuzhiyun int i;
2480*4882a593Smuzhiyun
2481*4882a593Smuzhiyun kfree(iadev->desc_tbl);
2482*4882a593Smuzhiyun for (i = 0; i < iadev->num_vc; i++)
2483*4882a593Smuzhiyun kfree(iadev->testTable[i]);
2484*4882a593Smuzhiyun kfree(iadev->testTable);
2485*4882a593Smuzhiyun for (i = 0; i < iadev->num_tx_desc; i++) {
2486*4882a593Smuzhiyun struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2489*4882a593Smuzhiyun sizeof(*desc->cpcs), DMA_TO_DEVICE);
2490*4882a593Smuzhiyun kfree(desc->cpcs);
2491*4882a593Smuzhiyun }
2492*4882a593Smuzhiyun kfree(iadev->tx_buf);
2493*4882a593Smuzhiyun dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2494*4882a593Smuzhiyun iadev->tx_dle_dma);
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun
2497*4882a593Smuzhiyun static void ia_free_rx(IADEV *iadev)
2498*4882a593Smuzhiyun {
2499*4882a593Smuzhiyun kfree(iadev->rx_open);
2500*4882a593Smuzhiyun dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2501*4882a593Smuzhiyun iadev->rx_dle_dma);
2502*4882a593Smuzhiyun }
2503*4882a593Smuzhiyun
2504*4882a593Smuzhiyun static int ia_start(struct atm_dev *dev)
2505*4882a593Smuzhiyun {
2506*4882a593Smuzhiyun IADEV *iadev;
2507*4882a593Smuzhiyun int error;
2508*4882a593Smuzhiyun unsigned char phy;
2509*4882a593Smuzhiyun u32 ctrl_reg;
2510*4882a593Smuzhiyun IF_EVENT(printk(">ia_start\n");)
2511*4882a593Smuzhiyun iadev = INPH_IA_DEV(dev);
2512*4882a593Smuzhiyun if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2513*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2514*4882a593Smuzhiyun dev->number, iadev->irq);
2515*4882a593Smuzhiyun error = -EAGAIN;
2516*4882a593Smuzhiyun goto err_out;
2517*4882a593Smuzhiyun }
2518*4882a593Smuzhiyun /* @@@ should release IRQ on error */
2519*4882a593Smuzhiyun /* enabling memory + master */
2520*4882a593Smuzhiyun if ((error = pci_write_config_word(iadev->pci,
2521*4882a593Smuzhiyun PCI_COMMAND,
2522*4882a593Smuzhiyun PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2523*4882a593Smuzhiyun {
2524*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2525*4882a593Smuzhiyun "master (0x%x)\n",dev->number, error);
2526*4882a593Smuzhiyun error = -EIO;
2527*4882a593Smuzhiyun goto err_free_irq;
2528*4882a593Smuzhiyun }
2529*4882a593Smuzhiyun udelay(10);
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun /* Maybe we should reset the front end, initialize Bus Interface Control
2532*4882a593Smuzhiyun Registers and see. */
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun IF_INIT(printk("Bus ctrl reg: %08x\n",
2535*4882a593Smuzhiyun readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2536*4882a593Smuzhiyun ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2537*4882a593Smuzhiyun ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2538*4882a593Smuzhiyun | CTRL_B8
2539*4882a593Smuzhiyun | CTRL_B16
2540*4882a593Smuzhiyun | CTRL_B32
2541*4882a593Smuzhiyun | CTRL_B48
2542*4882a593Smuzhiyun | CTRL_B64
2543*4882a593Smuzhiyun | CTRL_B128
2544*4882a593Smuzhiyun | CTRL_ERRMASK
2545*4882a593Smuzhiyun | CTRL_DLETMASK /* shud be removed l8r */
2546*4882a593Smuzhiyun | CTRL_DLERMASK
2547*4882a593Smuzhiyun | CTRL_SEGMASK
2548*4882a593Smuzhiyun | CTRL_REASSMASK
2549*4882a593Smuzhiyun | CTRL_FEMASK
2550*4882a593Smuzhiyun | CTRL_CSPREEMPT;
2551*4882a593Smuzhiyun
2552*4882a593Smuzhiyun writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2555*4882a593Smuzhiyun readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2556*4882a593Smuzhiyun printk("Bus status reg after init: %08x\n",
2557*4882a593Smuzhiyun readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun ia_hw_type(iadev);
2560*4882a593Smuzhiyun error = tx_init(dev);
2561*4882a593Smuzhiyun if (error)
2562*4882a593Smuzhiyun goto err_free_irq;
2563*4882a593Smuzhiyun error = rx_init(dev);
2564*4882a593Smuzhiyun if (error)
2565*4882a593Smuzhiyun goto err_free_tx;
2566*4882a593Smuzhiyun
2567*4882a593Smuzhiyun ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2568*4882a593Smuzhiyun writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2569*4882a593Smuzhiyun IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2570*4882a593Smuzhiyun readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2571*4882a593Smuzhiyun phy = 0; /* resolve compiler complaint */
2572*4882a593Smuzhiyun IF_INIT (
2573*4882a593Smuzhiyun if ((phy=ia_phy_get(dev,0)) == 0x30)
2574*4882a593Smuzhiyun printk("IA: pm5346,rev.%d\n",phy&0x0f);
2575*4882a593Smuzhiyun else
2576*4882a593Smuzhiyun printk("IA: utopia,rev.%0x\n",phy);)
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun if (iadev->phy_type & FE_25MBIT_PHY)
2579*4882a593Smuzhiyun ia_mb25_init(iadev);
2580*4882a593Smuzhiyun else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2581*4882a593Smuzhiyun ia_suni_pm7345_init(iadev);
2582*4882a593Smuzhiyun else {
2583*4882a593Smuzhiyun error = suni_init(dev);
2584*4882a593Smuzhiyun if (error)
2585*4882a593Smuzhiyun goto err_free_rx;
2586*4882a593Smuzhiyun if (dev->phy->start) {
2587*4882a593Smuzhiyun error = dev->phy->start(dev);
2588*4882a593Smuzhiyun if (error)
2589*4882a593Smuzhiyun goto err_free_rx;
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun /* Get iadev->carrier_detect status */
2592*4882a593Smuzhiyun ia_frontend_intr(iadev);
2593*4882a593Smuzhiyun }
2594*4882a593Smuzhiyun return 0;
2595*4882a593Smuzhiyun
2596*4882a593Smuzhiyun err_free_rx:
2597*4882a593Smuzhiyun ia_free_rx(iadev);
2598*4882a593Smuzhiyun err_free_tx:
2599*4882a593Smuzhiyun ia_free_tx(iadev);
2600*4882a593Smuzhiyun err_free_irq:
2601*4882a593Smuzhiyun free_irq(iadev->irq, dev);
2602*4882a593Smuzhiyun err_out:
2603*4882a593Smuzhiyun return error;
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun static void ia_close(struct atm_vcc *vcc)
2607*4882a593Smuzhiyun {
2608*4882a593Smuzhiyun DEFINE_WAIT(wait);
2609*4882a593Smuzhiyun u16 *vc_table;
2610*4882a593Smuzhiyun IADEV *iadev;
2611*4882a593Smuzhiyun struct ia_vcc *ia_vcc;
2612*4882a593Smuzhiyun struct sk_buff *skb = NULL;
2613*4882a593Smuzhiyun struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2614*4882a593Smuzhiyun unsigned long closetime, flags;
2615*4882a593Smuzhiyun
2616*4882a593Smuzhiyun iadev = INPH_IA_DEV(vcc->dev);
2617*4882a593Smuzhiyun ia_vcc = INPH_IA_VCC(vcc);
2618*4882a593Smuzhiyun if (!ia_vcc) return;
2619*4882a593Smuzhiyun
2620*4882a593Smuzhiyun IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2621*4882a593Smuzhiyun ia_vcc->vc_desc_cnt,vcc->vci);)
2622*4882a593Smuzhiyun clear_bit(ATM_VF_READY,&vcc->flags);
2623*4882a593Smuzhiyun skb_queue_head_init (&tmp_tx_backlog);
2624*4882a593Smuzhiyun skb_queue_head_init (&tmp_vcc_backlog);
2625*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2626*4882a593Smuzhiyun iadev->close_pending++;
2627*4882a593Smuzhiyun prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2628*4882a593Smuzhiyun schedule_timeout(msecs_to_jiffies(500));
2629*4882a593Smuzhiyun finish_wait(&iadev->timeout_wait, &wait);
2630*4882a593Smuzhiyun spin_lock_irqsave(&iadev->tx_lock, flags);
2631*4882a593Smuzhiyun while((skb = skb_dequeue(&iadev->tx_backlog))) {
2632*4882a593Smuzhiyun if (ATM_SKB(skb)->vcc == vcc){
2633*4882a593Smuzhiyun if (vcc->pop) vcc->pop(vcc, skb);
2634*4882a593Smuzhiyun else dev_kfree_skb_any(skb);
2635*4882a593Smuzhiyun }
2636*4882a593Smuzhiyun else
2637*4882a593Smuzhiyun skb_queue_tail(&tmp_tx_backlog, skb);
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun while((skb = skb_dequeue(&tmp_tx_backlog)))
2640*4882a593Smuzhiyun skb_queue_tail(&iadev->tx_backlog, skb);
2641*4882a593Smuzhiyun IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2642*4882a593Smuzhiyun closetime = 300000 / ia_vcc->pcr;
2643*4882a593Smuzhiyun if (closetime == 0)
2644*4882a593Smuzhiyun closetime = 1;
2645*4882a593Smuzhiyun spin_unlock_irqrestore(&iadev->tx_lock, flags);
2646*4882a593Smuzhiyun wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2647*4882a593Smuzhiyun spin_lock_irqsave(&iadev->tx_lock, flags);
2648*4882a593Smuzhiyun iadev->close_pending--;
2649*4882a593Smuzhiyun iadev->testTable[vcc->vci]->lastTime = 0;
2650*4882a593Smuzhiyun iadev->testTable[vcc->vci]->fract = 0;
2651*4882a593Smuzhiyun iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2652*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2653*4882a593Smuzhiyun if (vcc->qos.txtp.min_pcr > 0)
2654*4882a593Smuzhiyun iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2655*4882a593Smuzhiyun }
2656*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2657*4882a593Smuzhiyun ia_vcc = INPH_IA_VCC(vcc);
2658*4882a593Smuzhiyun iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2659*4882a593Smuzhiyun ia_cbrVc_close (vcc);
2660*4882a593Smuzhiyun }
2661*4882a593Smuzhiyun spin_unlock_irqrestore(&iadev->tx_lock, flags);
2662*4882a593Smuzhiyun }
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2665*4882a593Smuzhiyun // reset reass table
2666*4882a593Smuzhiyun vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2667*4882a593Smuzhiyun vc_table += vcc->vci;
2668*4882a593Smuzhiyun *vc_table = NO_AAL5_PKT;
2669*4882a593Smuzhiyun // reset vc table
2670*4882a593Smuzhiyun vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2671*4882a593Smuzhiyun vc_table += vcc->vci;
2672*4882a593Smuzhiyun *vc_table = (vcc->vci << 6) | 15;
2673*4882a593Smuzhiyun if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2674*4882a593Smuzhiyun struct abr_vc_table __iomem *abr_vc_table =
2675*4882a593Smuzhiyun (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2676*4882a593Smuzhiyun abr_vc_table += vcc->vci;
2677*4882a593Smuzhiyun abr_vc_table->rdf = 0x0003;
2678*4882a593Smuzhiyun abr_vc_table->air = 0x5eb1;
2679*4882a593Smuzhiyun }
2680*4882a593Smuzhiyun // Drain the packets
2681*4882a593Smuzhiyun rx_dle_intr(vcc->dev);
2682*4882a593Smuzhiyun iadev->rx_open[vcc->vci] = NULL;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun kfree(INPH_IA_VCC(vcc));
2685*4882a593Smuzhiyun ia_vcc = NULL;
2686*4882a593Smuzhiyun vcc->dev_data = NULL;
2687*4882a593Smuzhiyun clear_bit(ATM_VF_ADDR,&vcc->flags);
2688*4882a593Smuzhiyun return;
2689*4882a593Smuzhiyun }
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun static int ia_open(struct atm_vcc *vcc)
2692*4882a593Smuzhiyun {
2693*4882a593Smuzhiyun struct ia_vcc *ia_vcc;
2694*4882a593Smuzhiyun int error;
2695*4882a593Smuzhiyun if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2696*4882a593Smuzhiyun {
2697*4882a593Smuzhiyun IF_EVENT(printk("ia: not partially allocated resources\n");)
2698*4882a593Smuzhiyun vcc->dev_data = NULL;
2699*4882a593Smuzhiyun }
2700*4882a593Smuzhiyun if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2701*4882a593Smuzhiyun {
2702*4882a593Smuzhiyun IF_EVENT(printk("iphase open: unspec part\n");)
2703*4882a593Smuzhiyun set_bit(ATM_VF_ADDR,&vcc->flags);
2704*4882a593Smuzhiyun }
2705*4882a593Smuzhiyun if (vcc->qos.aal != ATM_AAL5)
2706*4882a593Smuzhiyun return -EINVAL;
2707*4882a593Smuzhiyun IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2708*4882a593Smuzhiyun vcc->dev->number, vcc->vpi, vcc->vci);)
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun /* Device dependent initialization */
2711*4882a593Smuzhiyun ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2712*4882a593Smuzhiyun if (!ia_vcc) return -ENOMEM;
2713*4882a593Smuzhiyun vcc->dev_data = ia_vcc;
2714*4882a593Smuzhiyun
2715*4882a593Smuzhiyun if ((error = open_rx(vcc)))
2716*4882a593Smuzhiyun {
2717*4882a593Smuzhiyun IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2718*4882a593Smuzhiyun ia_close(vcc);
2719*4882a593Smuzhiyun return error;
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun if ((error = open_tx(vcc)))
2723*4882a593Smuzhiyun {
2724*4882a593Smuzhiyun IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2725*4882a593Smuzhiyun ia_close(vcc);
2726*4882a593Smuzhiyun return error;
2727*4882a593Smuzhiyun }
2728*4882a593Smuzhiyun
2729*4882a593Smuzhiyun set_bit(ATM_VF_READY,&vcc->flags);
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun #if 0
2732*4882a593Smuzhiyun {
2733*4882a593Smuzhiyun static u8 first = 1;
2734*4882a593Smuzhiyun if (first) {
2735*4882a593Smuzhiyun ia_timer.expires = jiffies + 3*HZ;
2736*4882a593Smuzhiyun add_timer(&ia_timer);
2737*4882a593Smuzhiyun first = 0;
2738*4882a593Smuzhiyun }
2739*4882a593Smuzhiyun }
2740*4882a593Smuzhiyun #endif
2741*4882a593Smuzhiyun IF_EVENT(printk("ia open returning\n");)
2742*4882a593Smuzhiyun return 0;
2743*4882a593Smuzhiyun }
2744*4882a593Smuzhiyun
2745*4882a593Smuzhiyun static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2746*4882a593Smuzhiyun {
2747*4882a593Smuzhiyun IF_EVENT(printk(">ia_change_qos\n");)
2748*4882a593Smuzhiyun return 0;
2749*4882a593Smuzhiyun }
2750*4882a593Smuzhiyun
2751*4882a593Smuzhiyun static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2752*4882a593Smuzhiyun {
2753*4882a593Smuzhiyun IA_CMDBUF ia_cmds;
2754*4882a593Smuzhiyun IADEV *iadev;
2755*4882a593Smuzhiyun int i, board;
2756*4882a593Smuzhiyun u16 __user *tmps;
2757*4882a593Smuzhiyun IF_EVENT(printk(">ia_ioctl\n");)
2758*4882a593Smuzhiyun if (cmd != IA_CMD) {
2759*4882a593Smuzhiyun if (!dev->phy->ioctl) return -EINVAL;
2760*4882a593Smuzhiyun return dev->phy->ioctl(dev,cmd,arg);
2761*4882a593Smuzhiyun }
2762*4882a593Smuzhiyun if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2763*4882a593Smuzhiyun board = ia_cmds.status;
2764*4882a593Smuzhiyun
2765*4882a593Smuzhiyun if ((board < 0) || (board > iadev_count))
2766*4882a593Smuzhiyun board = 0;
2767*4882a593Smuzhiyun board = array_index_nospec(board, iadev_count + 1);
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun iadev = ia_dev[board];
2770*4882a593Smuzhiyun switch (ia_cmds.cmd) {
2771*4882a593Smuzhiyun case MEMDUMP:
2772*4882a593Smuzhiyun {
2773*4882a593Smuzhiyun switch (ia_cmds.sub_cmd) {
2774*4882a593Smuzhiyun case MEMDUMP_SEGREG:
2775*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN)) return -EPERM;
2776*4882a593Smuzhiyun tmps = (u16 __user *)ia_cmds.buf;
2777*4882a593Smuzhiyun for(i=0; i<0x80; i+=2, tmps++)
2778*4882a593Smuzhiyun if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2779*4882a593Smuzhiyun ia_cmds.status = 0;
2780*4882a593Smuzhiyun ia_cmds.len = 0x80;
2781*4882a593Smuzhiyun break;
2782*4882a593Smuzhiyun case MEMDUMP_REASSREG:
2783*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN)) return -EPERM;
2784*4882a593Smuzhiyun tmps = (u16 __user *)ia_cmds.buf;
2785*4882a593Smuzhiyun for(i=0; i<0x80; i+=2, tmps++)
2786*4882a593Smuzhiyun if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2787*4882a593Smuzhiyun ia_cmds.status = 0;
2788*4882a593Smuzhiyun ia_cmds.len = 0x80;
2789*4882a593Smuzhiyun break;
2790*4882a593Smuzhiyun case MEMDUMP_FFL:
2791*4882a593Smuzhiyun {
2792*4882a593Smuzhiyun ia_regs_t *regs_local;
2793*4882a593Smuzhiyun ffredn_t *ffL;
2794*4882a593Smuzhiyun rfredn_t *rfL;
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN)) return -EPERM;
2797*4882a593Smuzhiyun regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2798*4882a593Smuzhiyun if (!regs_local) return -ENOMEM;
2799*4882a593Smuzhiyun ffL = ®s_local->ffredn;
2800*4882a593Smuzhiyun rfL = ®s_local->rfredn;
2801*4882a593Smuzhiyun /* Copy real rfred registers into the local copy */
2802*4882a593Smuzhiyun for (i=0; i<(sizeof (rfredn_t))/4; i++)
2803*4882a593Smuzhiyun ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2804*4882a593Smuzhiyun /* Copy real ffred registers into the local copy */
2805*4882a593Smuzhiyun for (i=0; i<(sizeof (ffredn_t))/4; i++)
2806*4882a593Smuzhiyun ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2809*4882a593Smuzhiyun kfree(regs_local);
2810*4882a593Smuzhiyun return -EFAULT;
2811*4882a593Smuzhiyun }
2812*4882a593Smuzhiyun kfree(regs_local);
2813*4882a593Smuzhiyun printk("Board %d registers dumped\n", board);
2814*4882a593Smuzhiyun ia_cmds.status = 0;
2815*4882a593Smuzhiyun }
2816*4882a593Smuzhiyun break;
2817*4882a593Smuzhiyun case READ_REG:
2818*4882a593Smuzhiyun {
2819*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN)) return -EPERM;
2820*4882a593Smuzhiyun desc_dbg(iadev);
2821*4882a593Smuzhiyun ia_cmds.status = 0;
2822*4882a593Smuzhiyun }
2823*4882a593Smuzhiyun break;
2824*4882a593Smuzhiyun case 0x6:
2825*4882a593Smuzhiyun {
2826*4882a593Smuzhiyun ia_cmds.status = 0;
2827*4882a593Smuzhiyun printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2828*4882a593Smuzhiyun printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun break;
2831*4882a593Smuzhiyun case 0x8:
2832*4882a593Smuzhiyun {
2833*4882a593Smuzhiyun struct k_sonet_stats *stats;
2834*4882a593Smuzhiyun stats = &PRIV(_ia_dev[board])->sonet_stats;
2835*4882a593Smuzhiyun printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2836*4882a593Smuzhiyun printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2837*4882a593Smuzhiyun printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2838*4882a593Smuzhiyun printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2839*4882a593Smuzhiyun printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2840*4882a593Smuzhiyun printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2841*4882a593Smuzhiyun printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2842*4882a593Smuzhiyun printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2843*4882a593Smuzhiyun printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2844*4882a593Smuzhiyun }
2845*4882a593Smuzhiyun ia_cmds.status = 0;
2846*4882a593Smuzhiyun break;
2847*4882a593Smuzhiyun case 0x9:
2848*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN)) return -EPERM;
2849*4882a593Smuzhiyun for (i = 1; i <= iadev->num_rx_desc; i++)
2850*4882a593Smuzhiyun free_desc(_ia_dev[board], i);
2851*4882a593Smuzhiyun writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2852*4882a593Smuzhiyun iadev->reass_reg+REASS_MASK_REG);
2853*4882a593Smuzhiyun iadev->rxing = 1;
2854*4882a593Smuzhiyun
2855*4882a593Smuzhiyun ia_cmds.status = 0;
2856*4882a593Smuzhiyun break;
2857*4882a593Smuzhiyun
2858*4882a593Smuzhiyun case 0xb:
2859*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN)) return -EPERM;
2860*4882a593Smuzhiyun ia_frontend_intr(iadev);
2861*4882a593Smuzhiyun break;
2862*4882a593Smuzhiyun case 0xa:
2863*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN)) return -EPERM;
2864*4882a593Smuzhiyun {
2865*4882a593Smuzhiyun ia_cmds.status = 0;
2866*4882a593Smuzhiyun IADebugFlag = ia_cmds.maddr;
2867*4882a593Smuzhiyun printk("New debug option loaded\n");
2868*4882a593Smuzhiyun }
2869*4882a593Smuzhiyun break;
2870*4882a593Smuzhiyun default:
2871*4882a593Smuzhiyun ia_cmds.status = 0;
2872*4882a593Smuzhiyun break;
2873*4882a593Smuzhiyun }
2874*4882a593Smuzhiyun }
2875*4882a593Smuzhiyun break;
2876*4882a593Smuzhiyun default:
2877*4882a593Smuzhiyun break;
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun }
2880*4882a593Smuzhiyun return 0;
2881*4882a593Smuzhiyun }
2882*4882a593Smuzhiyun
2883*4882a593Smuzhiyun static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2884*4882a593Smuzhiyun IADEV *iadev;
2885*4882a593Smuzhiyun struct dle *wr_ptr;
2886*4882a593Smuzhiyun struct tx_buf_desc __iomem *buf_desc_ptr;
2887*4882a593Smuzhiyun int desc;
2888*4882a593Smuzhiyun int comp_code;
2889*4882a593Smuzhiyun int total_len;
2890*4882a593Smuzhiyun struct cpcs_trailer *trailer;
2891*4882a593Smuzhiyun struct ia_vcc *iavcc;
2892*4882a593Smuzhiyun
2893*4882a593Smuzhiyun iadev = INPH_IA_DEV(vcc->dev);
2894*4882a593Smuzhiyun iavcc = INPH_IA_VCC(vcc);
2895*4882a593Smuzhiyun if (!iavcc->txing) {
2896*4882a593Smuzhiyun printk("discard packet on closed VC\n");
2897*4882a593Smuzhiyun if (vcc->pop)
2898*4882a593Smuzhiyun vcc->pop(vcc, skb);
2899*4882a593Smuzhiyun else
2900*4882a593Smuzhiyun dev_kfree_skb_any(skb);
2901*4882a593Smuzhiyun return 0;
2902*4882a593Smuzhiyun }
2903*4882a593Smuzhiyun
2904*4882a593Smuzhiyun if (skb->len > iadev->tx_buf_sz - 8) {
2905*4882a593Smuzhiyun printk("Transmit size over tx buffer size\n");
2906*4882a593Smuzhiyun if (vcc->pop)
2907*4882a593Smuzhiyun vcc->pop(vcc, skb);
2908*4882a593Smuzhiyun else
2909*4882a593Smuzhiyun dev_kfree_skb_any(skb);
2910*4882a593Smuzhiyun return 0;
2911*4882a593Smuzhiyun }
2912*4882a593Smuzhiyun if ((unsigned long)skb->data & 3) {
2913*4882a593Smuzhiyun printk("Misaligned SKB\n");
2914*4882a593Smuzhiyun if (vcc->pop)
2915*4882a593Smuzhiyun vcc->pop(vcc, skb);
2916*4882a593Smuzhiyun else
2917*4882a593Smuzhiyun dev_kfree_skb_any(skb);
2918*4882a593Smuzhiyun return 0;
2919*4882a593Smuzhiyun }
2920*4882a593Smuzhiyun /* Get a descriptor number from our free descriptor queue
2921*4882a593Smuzhiyun We get the descr number from the TCQ now, since I am using
2922*4882a593Smuzhiyun the TCQ as a free buffer queue. Initially TCQ will be
2923*4882a593Smuzhiyun initialized with all the descriptors and is hence, full.
2924*4882a593Smuzhiyun */
2925*4882a593Smuzhiyun desc = get_desc (iadev, iavcc);
2926*4882a593Smuzhiyun if (desc == 0xffff)
2927*4882a593Smuzhiyun return 1;
2928*4882a593Smuzhiyun comp_code = desc >> 13;
2929*4882a593Smuzhiyun desc &= 0x1fff;
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun if ((desc == 0) || (desc > iadev->num_tx_desc))
2932*4882a593Smuzhiyun {
2933*4882a593Smuzhiyun IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2934*4882a593Smuzhiyun atomic_inc(&vcc->stats->tx);
2935*4882a593Smuzhiyun if (vcc->pop)
2936*4882a593Smuzhiyun vcc->pop(vcc, skb);
2937*4882a593Smuzhiyun else
2938*4882a593Smuzhiyun dev_kfree_skb_any(skb);
2939*4882a593Smuzhiyun return 0; /* return SUCCESS */
2940*4882a593Smuzhiyun }
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun if (comp_code)
2943*4882a593Smuzhiyun {
2944*4882a593Smuzhiyun IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2945*4882a593Smuzhiyun desc, comp_code);)
2946*4882a593Smuzhiyun }
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun /* remember the desc and vcc mapping */
2949*4882a593Smuzhiyun iavcc->vc_desc_cnt++;
2950*4882a593Smuzhiyun iadev->desc_tbl[desc-1].iavcc = iavcc;
2951*4882a593Smuzhiyun iadev->desc_tbl[desc-1].txskb = skb;
2952*4882a593Smuzhiyun IA_SKB_STATE(skb) = 0;
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun iadev->ffL.tcq_rd += 2;
2955*4882a593Smuzhiyun if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2956*4882a593Smuzhiyun iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2957*4882a593Smuzhiyun writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2958*4882a593Smuzhiyun
2959*4882a593Smuzhiyun /* Put the descriptor number in the packet ready queue
2960*4882a593Smuzhiyun and put the updated write pointer in the DLE field
2961*4882a593Smuzhiyun */
2962*4882a593Smuzhiyun *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2963*4882a593Smuzhiyun
2964*4882a593Smuzhiyun iadev->ffL.prq_wr += 2;
2965*4882a593Smuzhiyun if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2966*4882a593Smuzhiyun iadev->ffL.prq_wr = iadev->ffL.prq_st;
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun /* Figure out the exact length of the packet and padding required to
2969*4882a593Smuzhiyun make it aligned on a 48 byte boundary. */
2970*4882a593Smuzhiyun total_len = skb->len + sizeof(struct cpcs_trailer);
2971*4882a593Smuzhiyun total_len = ((total_len + 47) / 48) * 48;
2972*4882a593Smuzhiyun IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun /* Put the packet in a tx buffer */
2975*4882a593Smuzhiyun trailer = iadev->tx_buf[desc-1].cpcs;
2976*4882a593Smuzhiyun IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2977*4882a593Smuzhiyun skb, skb->data, skb->len, desc);)
2978*4882a593Smuzhiyun trailer->control = 0;
2979*4882a593Smuzhiyun /*big endian*/
2980*4882a593Smuzhiyun trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2981*4882a593Smuzhiyun trailer->crc32 = 0; /* not needed - dummy bytes */
2982*4882a593Smuzhiyun
2983*4882a593Smuzhiyun /* Display the packet */
2984*4882a593Smuzhiyun IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2985*4882a593Smuzhiyun skb->len, tcnter++);
2986*4882a593Smuzhiyun xdump(skb->data, skb->len, "TX: ");
2987*4882a593Smuzhiyun printk("\n");)
2988*4882a593Smuzhiyun
2989*4882a593Smuzhiyun /* Build the buffer descriptor */
2990*4882a593Smuzhiyun buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2991*4882a593Smuzhiyun buf_desc_ptr += desc; /* points to the corresponding entry */
2992*4882a593Smuzhiyun buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2993*4882a593Smuzhiyun /* Huh ? p.115 of users guide describes this as a read-only register */
2994*4882a593Smuzhiyun writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2995*4882a593Smuzhiyun buf_desc_ptr->vc_index = vcc->vci;
2996*4882a593Smuzhiyun buf_desc_ptr->bytes = total_len;
2997*4882a593Smuzhiyun
2998*4882a593Smuzhiyun if (vcc->qos.txtp.traffic_class == ATM_ABR)
2999*4882a593Smuzhiyun clear_lockup (vcc, iadev);
3000*4882a593Smuzhiyun
3001*4882a593Smuzhiyun /* Build the DLE structure */
3002*4882a593Smuzhiyun wr_ptr = iadev->tx_dle_q.write;
3003*4882a593Smuzhiyun memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3004*4882a593Smuzhiyun wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3005*4882a593Smuzhiyun skb->len, DMA_TO_DEVICE);
3006*4882a593Smuzhiyun wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3007*4882a593Smuzhiyun buf_desc_ptr->buf_start_lo;
3008*4882a593Smuzhiyun /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3009*4882a593Smuzhiyun wr_ptr->bytes = skb->len;
3010*4882a593Smuzhiyun
3011*4882a593Smuzhiyun /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3012*4882a593Smuzhiyun if ((wr_ptr->bytes >> 2) == 0xb)
3013*4882a593Smuzhiyun wr_ptr->bytes = 0x30;
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun wr_ptr->mode = TX_DLE_PSI;
3016*4882a593Smuzhiyun wr_ptr->prq_wr_ptr_data = 0;
3017*4882a593Smuzhiyun
3018*4882a593Smuzhiyun /* end is not to be used for the DLE q */
3019*4882a593Smuzhiyun if (++wr_ptr == iadev->tx_dle_q.end)
3020*4882a593Smuzhiyun wr_ptr = iadev->tx_dle_q.start;
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun /* Build trailer dle */
3023*4882a593Smuzhiyun wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3024*4882a593Smuzhiyun wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3025*4882a593Smuzhiyun buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3026*4882a593Smuzhiyun
3027*4882a593Smuzhiyun wr_ptr->bytes = sizeof(struct cpcs_trailer);
3028*4882a593Smuzhiyun wr_ptr->mode = DMA_INT_ENABLE;
3029*4882a593Smuzhiyun wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3030*4882a593Smuzhiyun
3031*4882a593Smuzhiyun /* end is not to be used for the DLE q */
3032*4882a593Smuzhiyun if (++wr_ptr == iadev->tx_dle_q.end)
3033*4882a593Smuzhiyun wr_ptr = iadev->tx_dle_q.start;
3034*4882a593Smuzhiyun
3035*4882a593Smuzhiyun iadev->tx_dle_q.write = wr_ptr;
3036*4882a593Smuzhiyun ATM_DESC(skb) = vcc->vci;
3037*4882a593Smuzhiyun skb_queue_tail(&iadev->tx_dma_q, skb);
3038*4882a593Smuzhiyun
3039*4882a593Smuzhiyun atomic_inc(&vcc->stats->tx);
3040*4882a593Smuzhiyun iadev->tx_pkt_cnt++;
3041*4882a593Smuzhiyun /* Increment transaction counter */
3042*4882a593Smuzhiyun writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun #if 0
3045*4882a593Smuzhiyun /* add flow control logic */
3046*4882a593Smuzhiyun if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3047*4882a593Smuzhiyun if (iavcc->vc_desc_cnt > 10) {
3048*4882a593Smuzhiyun vcc->tx_quota = vcc->tx_quota * 3 / 4;
3049*4882a593Smuzhiyun printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3050*4882a593Smuzhiyun iavcc->flow_inc = -1;
3051*4882a593Smuzhiyun iavcc->saved_tx_quota = vcc->tx_quota;
3052*4882a593Smuzhiyun } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3053*4882a593Smuzhiyun // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3054*4882a593Smuzhiyun printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3055*4882a593Smuzhiyun iavcc->flow_inc = 0;
3056*4882a593Smuzhiyun }
3057*4882a593Smuzhiyun }
3058*4882a593Smuzhiyun #endif
3059*4882a593Smuzhiyun IF_TX(printk("ia send done\n");)
3060*4882a593Smuzhiyun return 0;
3061*4882a593Smuzhiyun }
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3064*4882a593Smuzhiyun {
3065*4882a593Smuzhiyun IADEV *iadev;
3066*4882a593Smuzhiyun unsigned long flags;
3067*4882a593Smuzhiyun
3068*4882a593Smuzhiyun iadev = INPH_IA_DEV(vcc->dev);
3069*4882a593Smuzhiyun if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3070*4882a593Smuzhiyun {
3071*4882a593Smuzhiyun if (!skb)
3072*4882a593Smuzhiyun printk(KERN_CRIT "null skb in ia_send\n");
3073*4882a593Smuzhiyun else dev_kfree_skb_any(skb);
3074*4882a593Smuzhiyun return -EINVAL;
3075*4882a593Smuzhiyun }
3076*4882a593Smuzhiyun spin_lock_irqsave(&iadev->tx_lock, flags);
3077*4882a593Smuzhiyun if (!test_bit(ATM_VF_READY,&vcc->flags)){
3078*4882a593Smuzhiyun dev_kfree_skb_any(skb);
3079*4882a593Smuzhiyun spin_unlock_irqrestore(&iadev->tx_lock, flags);
3080*4882a593Smuzhiyun return -EINVAL;
3081*4882a593Smuzhiyun }
3082*4882a593Smuzhiyun ATM_SKB(skb)->vcc = vcc;
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun if (skb_peek(&iadev->tx_backlog)) {
3085*4882a593Smuzhiyun skb_queue_tail(&iadev->tx_backlog, skb);
3086*4882a593Smuzhiyun }
3087*4882a593Smuzhiyun else {
3088*4882a593Smuzhiyun if (ia_pkt_tx (vcc, skb)) {
3089*4882a593Smuzhiyun skb_queue_tail(&iadev->tx_backlog, skb);
3090*4882a593Smuzhiyun }
3091*4882a593Smuzhiyun }
3092*4882a593Smuzhiyun spin_unlock_irqrestore(&iadev->tx_lock, flags);
3093*4882a593Smuzhiyun return 0;
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun }
3096*4882a593Smuzhiyun
3097*4882a593Smuzhiyun static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3098*4882a593Smuzhiyun {
3099*4882a593Smuzhiyun int left = *pos, n;
3100*4882a593Smuzhiyun char *tmpPtr;
3101*4882a593Smuzhiyun IADEV *iadev = INPH_IA_DEV(dev);
3102*4882a593Smuzhiyun if(!left--) {
3103*4882a593Smuzhiyun if (iadev->phy_type == FE_25MBIT_PHY) {
3104*4882a593Smuzhiyun n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3105*4882a593Smuzhiyun return n;
3106*4882a593Smuzhiyun }
3107*4882a593Smuzhiyun if (iadev->phy_type == FE_DS3_PHY)
3108*4882a593Smuzhiyun n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3109*4882a593Smuzhiyun else if (iadev->phy_type == FE_E3_PHY)
3110*4882a593Smuzhiyun n = sprintf(page, " Board Type : Iphase-ATM-E3");
3111*4882a593Smuzhiyun else if (iadev->phy_type == FE_UTP_OPTION)
3112*4882a593Smuzhiyun n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3113*4882a593Smuzhiyun else
3114*4882a593Smuzhiyun n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3115*4882a593Smuzhiyun tmpPtr = page + n;
3116*4882a593Smuzhiyun if (iadev->pci_map_size == 0x40000)
3117*4882a593Smuzhiyun n += sprintf(tmpPtr, "-1KVC-");
3118*4882a593Smuzhiyun else
3119*4882a593Smuzhiyun n += sprintf(tmpPtr, "-4KVC-");
3120*4882a593Smuzhiyun tmpPtr = page + n;
3121*4882a593Smuzhiyun if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3122*4882a593Smuzhiyun n += sprintf(tmpPtr, "1M \n");
3123*4882a593Smuzhiyun else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3124*4882a593Smuzhiyun n += sprintf(tmpPtr, "512K\n");
3125*4882a593Smuzhiyun else
3126*4882a593Smuzhiyun n += sprintf(tmpPtr, "128K\n");
3127*4882a593Smuzhiyun return n;
3128*4882a593Smuzhiyun }
3129*4882a593Smuzhiyun if (!left) {
3130*4882a593Smuzhiyun return sprintf(page, " Number of Tx Buffer: %u\n"
3131*4882a593Smuzhiyun " Size of Tx Buffer : %u\n"
3132*4882a593Smuzhiyun " Number of Rx Buffer: %u\n"
3133*4882a593Smuzhiyun " Size of Rx Buffer : %u\n"
3134*4882a593Smuzhiyun " Packets Received : %u\n"
3135*4882a593Smuzhiyun " Packets Transmitted: %u\n"
3136*4882a593Smuzhiyun " Cells Received : %u\n"
3137*4882a593Smuzhiyun " Cells Transmitted : %u\n"
3138*4882a593Smuzhiyun " Board Dropped Cells: %u\n"
3139*4882a593Smuzhiyun " Board Dropped Pkts : %u\n",
3140*4882a593Smuzhiyun iadev->num_tx_desc, iadev->tx_buf_sz,
3141*4882a593Smuzhiyun iadev->num_rx_desc, iadev->rx_buf_sz,
3142*4882a593Smuzhiyun iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3143*4882a593Smuzhiyun iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3144*4882a593Smuzhiyun iadev->drop_rxcell, iadev->drop_rxpkt);
3145*4882a593Smuzhiyun }
3146*4882a593Smuzhiyun return 0;
3147*4882a593Smuzhiyun }
3148*4882a593Smuzhiyun
3149*4882a593Smuzhiyun static const struct atmdev_ops ops = {
3150*4882a593Smuzhiyun .open = ia_open,
3151*4882a593Smuzhiyun .close = ia_close,
3152*4882a593Smuzhiyun .ioctl = ia_ioctl,
3153*4882a593Smuzhiyun .send = ia_send,
3154*4882a593Smuzhiyun .phy_put = ia_phy_put,
3155*4882a593Smuzhiyun .phy_get = ia_phy_get,
3156*4882a593Smuzhiyun .change_qos = ia_change_qos,
3157*4882a593Smuzhiyun .proc_read = ia_proc_read,
3158*4882a593Smuzhiyun .owner = THIS_MODULE,
3159*4882a593Smuzhiyun };
3160*4882a593Smuzhiyun
3161*4882a593Smuzhiyun static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3162*4882a593Smuzhiyun {
3163*4882a593Smuzhiyun struct atm_dev *dev;
3164*4882a593Smuzhiyun IADEV *iadev;
3165*4882a593Smuzhiyun int ret;
3166*4882a593Smuzhiyun
3167*4882a593Smuzhiyun iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3168*4882a593Smuzhiyun if (!iadev) {
3169*4882a593Smuzhiyun ret = -ENOMEM;
3170*4882a593Smuzhiyun goto err_out;
3171*4882a593Smuzhiyun }
3172*4882a593Smuzhiyun
3173*4882a593Smuzhiyun iadev->pci = pdev;
3174*4882a593Smuzhiyun
3175*4882a593Smuzhiyun IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3176*4882a593Smuzhiyun pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3177*4882a593Smuzhiyun if (pci_enable_device(pdev)) {
3178*4882a593Smuzhiyun ret = -ENODEV;
3179*4882a593Smuzhiyun goto err_out_free_iadev;
3180*4882a593Smuzhiyun }
3181*4882a593Smuzhiyun dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3182*4882a593Smuzhiyun if (!dev) {
3183*4882a593Smuzhiyun ret = -ENOMEM;
3184*4882a593Smuzhiyun goto err_out_disable_dev;
3185*4882a593Smuzhiyun }
3186*4882a593Smuzhiyun dev->dev_data = iadev;
3187*4882a593Smuzhiyun IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3188*4882a593Smuzhiyun IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3189*4882a593Smuzhiyun iadev->LineRate);)
3190*4882a593Smuzhiyun
3191*4882a593Smuzhiyun pci_set_drvdata(pdev, dev);
3192*4882a593Smuzhiyun
3193*4882a593Smuzhiyun ia_dev[iadev_count] = iadev;
3194*4882a593Smuzhiyun _ia_dev[iadev_count] = dev;
3195*4882a593Smuzhiyun iadev_count++;
3196*4882a593Smuzhiyun if (ia_init(dev) || ia_start(dev)) {
3197*4882a593Smuzhiyun IF_INIT(printk("IA register failed!\n");)
3198*4882a593Smuzhiyun iadev_count--;
3199*4882a593Smuzhiyun ia_dev[iadev_count] = NULL;
3200*4882a593Smuzhiyun _ia_dev[iadev_count] = NULL;
3201*4882a593Smuzhiyun ret = -EINVAL;
3202*4882a593Smuzhiyun goto err_out_deregister_dev;
3203*4882a593Smuzhiyun }
3204*4882a593Smuzhiyun IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3205*4882a593Smuzhiyun
3206*4882a593Smuzhiyun iadev->next_board = ia_boards;
3207*4882a593Smuzhiyun ia_boards = dev;
3208*4882a593Smuzhiyun
3209*4882a593Smuzhiyun return 0;
3210*4882a593Smuzhiyun
3211*4882a593Smuzhiyun err_out_deregister_dev:
3212*4882a593Smuzhiyun atm_dev_deregister(dev);
3213*4882a593Smuzhiyun err_out_disable_dev:
3214*4882a593Smuzhiyun pci_disable_device(pdev);
3215*4882a593Smuzhiyun err_out_free_iadev:
3216*4882a593Smuzhiyun kfree(iadev);
3217*4882a593Smuzhiyun err_out:
3218*4882a593Smuzhiyun return ret;
3219*4882a593Smuzhiyun }
3220*4882a593Smuzhiyun
3221*4882a593Smuzhiyun static void ia_remove_one(struct pci_dev *pdev)
3222*4882a593Smuzhiyun {
3223*4882a593Smuzhiyun struct atm_dev *dev = pci_get_drvdata(pdev);
3224*4882a593Smuzhiyun IADEV *iadev = INPH_IA_DEV(dev);
3225*4882a593Smuzhiyun
3226*4882a593Smuzhiyun /* Disable phy interrupts */
3227*4882a593Smuzhiyun ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3228*4882a593Smuzhiyun SUNI_RSOP_CIE);
3229*4882a593Smuzhiyun udelay(1);
3230*4882a593Smuzhiyun
3231*4882a593Smuzhiyun if (dev->phy && dev->phy->stop)
3232*4882a593Smuzhiyun dev->phy->stop(dev);
3233*4882a593Smuzhiyun
3234*4882a593Smuzhiyun /* De-register device */
3235*4882a593Smuzhiyun free_irq(iadev->irq, dev);
3236*4882a593Smuzhiyun iadev_count--;
3237*4882a593Smuzhiyun ia_dev[iadev_count] = NULL;
3238*4882a593Smuzhiyun _ia_dev[iadev_count] = NULL;
3239*4882a593Smuzhiyun IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3240*4882a593Smuzhiyun atm_dev_deregister(dev);
3241*4882a593Smuzhiyun
3242*4882a593Smuzhiyun iounmap(iadev->base);
3243*4882a593Smuzhiyun pci_disable_device(pdev);
3244*4882a593Smuzhiyun
3245*4882a593Smuzhiyun ia_free_rx(iadev);
3246*4882a593Smuzhiyun ia_free_tx(iadev);
3247*4882a593Smuzhiyun
3248*4882a593Smuzhiyun kfree(iadev);
3249*4882a593Smuzhiyun }
3250*4882a593Smuzhiyun
3251*4882a593Smuzhiyun static const struct pci_device_id ia_pci_tbl[] = {
3252*4882a593Smuzhiyun { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3253*4882a593Smuzhiyun { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3254*4882a593Smuzhiyun { 0,}
3255*4882a593Smuzhiyun };
3256*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3257*4882a593Smuzhiyun
3258*4882a593Smuzhiyun static struct pci_driver ia_driver = {
3259*4882a593Smuzhiyun .name = DEV_LABEL,
3260*4882a593Smuzhiyun .id_table = ia_pci_tbl,
3261*4882a593Smuzhiyun .probe = ia_init_one,
3262*4882a593Smuzhiyun .remove = ia_remove_one,
3263*4882a593Smuzhiyun };
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun static int __init ia_module_init(void)
3266*4882a593Smuzhiyun {
3267*4882a593Smuzhiyun int ret;
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun ret = pci_register_driver(&ia_driver);
3270*4882a593Smuzhiyun if (ret >= 0) {
3271*4882a593Smuzhiyun ia_timer.expires = jiffies + 3*HZ;
3272*4882a593Smuzhiyun add_timer(&ia_timer);
3273*4882a593Smuzhiyun } else
3274*4882a593Smuzhiyun printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3275*4882a593Smuzhiyun return ret;
3276*4882a593Smuzhiyun }
3277*4882a593Smuzhiyun
3278*4882a593Smuzhiyun static void __exit ia_module_exit(void)
3279*4882a593Smuzhiyun {
3280*4882a593Smuzhiyun pci_unregister_driver(&ia_driver);
3281*4882a593Smuzhiyun
3282*4882a593Smuzhiyun del_timer_sync(&ia_timer);
3283*4882a593Smuzhiyun }
3284*4882a593Smuzhiyun
3285*4882a593Smuzhiyun module_init(ia_module_init);
3286*4882a593Smuzhiyun module_exit(ia_module_exit);
3287