1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-1.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Device driver for Microgate SyncLink ISA and PCI
6*4882a593Smuzhiyun * high speed multiprotocol serial adapters.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * written by Paul Fulghum for Microgate Corporation
9*4882a593Smuzhiyun * paulkf@microgate.com
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Microgate and SyncLink are trademarks of Microgate Corporation
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Original release 01/11/99
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * This driver is primarily intended for use in synchronous
18*4882a593Smuzhiyun * HDLC mode. Asynchronous mode is also provided.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * When operating in synchronous mode, each call to mgsl_write()
21*4882a593Smuzhiyun * contains exactly one complete HDLC frame. Calling mgsl_put_char
22*4882a593Smuzhiyun * will start assembling an HDLC frame that will not be sent until
23*4882a593Smuzhiyun * mgsl_flush_chars or mgsl_write is called.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * Synchronous receive data is reported as complete frames. To accomplish
26*4882a593Smuzhiyun * this, the TTY flip buffer is bypassed (too small to hold largest
27*4882a593Smuzhiyun * frame and may fragment frames) and the line discipline
28*4882a593Smuzhiyun * receive entry point is called directly.
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * This driver has been tested with a slightly modified ppp.c driver
31*4882a593Smuzhiyun * for synchronous PPP.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * 2000/02/16
34*4882a593Smuzhiyun * Added interface for syncppp.c driver (an alternate synchronous PPP
35*4882a593Smuzhiyun * implementation that also supports Cisco HDLC). Each device instance
36*4882a593Smuzhiyun * registers as a tty device AND a network device (if dosyncppp option
37*4882a593Smuzhiyun * is set for the device). The functionality is determined by which
38*4882a593Smuzhiyun * device interface is opened.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
41*4882a593Smuzhiyun * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
42*4882a593Smuzhiyun * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
43*4882a593Smuzhiyun * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
44*4882a593Smuzhiyun * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
45*4882a593Smuzhiyun * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
46*4882a593Smuzhiyun * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47*4882a593Smuzhiyun * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
48*4882a593Smuzhiyun * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
49*4882a593Smuzhiyun * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
50*4882a593Smuzhiyun * OF THE POSSIBILITY OF SUCH DAMAGE.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #if defined(__i386__)
54*4882a593Smuzhiyun # define BREAKPOINT() asm(" int $3");
55*4882a593Smuzhiyun #else
56*4882a593Smuzhiyun # define BREAKPOINT() { }
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define MAX_ISA_DEVICES 10
60*4882a593Smuzhiyun #define MAX_PCI_DEVICES 10
61*4882a593Smuzhiyun #define MAX_TOTAL_DEVICES 20
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #include <linux/module.h>
64*4882a593Smuzhiyun #include <linux/errno.h>
65*4882a593Smuzhiyun #include <linux/signal.h>
66*4882a593Smuzhiyun #include <linux/sched.h>
67*4882a593Smuzhiyun #include <linux/timer.h>
68*4882a593Smuzhiyun #include <linux/interrupt.h>
69*4882a593Smuzhiyun #include <linux/pci.h>
70*4882a593Smuzhiyun #include <linux/tty.h>
71*4882a593Smuzhiyun #include <linux/tty_flip.h>
72*4882a593Smuzhiyun #include <linux/serial.h>
73*4882a593Smuzhiyun #include <linux/major.h>
74*4882a593Smuzhiyun #include <linux/string.h>
75*4882a593Smuzhiyun #include <linux/fcntl.h>
76*4882a593Smuzhiyun #include <linux/ptrace.h>
77*4882a593Smuzhiyun #include <linux/ioport.h>
78*4882a593Smuzhiyun #include <linux/mm.h>
79*4882a593Smuzhiyun #include <linux/seq_file.h>
80*4882a593Smuzhiyun #include <linux/slab.h>
81*4882a593Smuzhiyun #include <linux/delay.h>
82*4882a593Smuzhiyun #include <linux/netdevice.h>
83*4882a593Smuzhiyun #include <linux/vmalloc.h>
84*4882a593Smuzhiyun #include <linux/init.h>
85*4882a593Smuzhiyun #include <linux/ioctl.h>
86*4882a593Smuzhiyun #include <linux/synclink.h>
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #include <asm/io.h>
89*4882a593Smuzhiyun #include <asm/irq.h>
90*4882a593Smuzhiyun #include <asm/dma.h>
91*4882a593Smuzhiyun #include <linux/bitops.h>
92*4882a593Smuzhiyun #include <asm/types.h>
93*4882a593Smuzhiyun #include <linux/termios.h>
94*4882a593Smuzhiyun #include <linux/workqueue.h>
95*4882a593Smuzhiyun #include <linux/hdlc.h>
96*4882a593Smuzhiyun #include <linux/dma-mapping.h>
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
99*4882a593Smuzhiyun #define SYNCLINK_GENERIC_HDLC 1
100*4882a593Smuzhiyun #else
101*4882a593Smuzhiyun #define SYNCLINK_GENERIC_HDLC 0
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun #define GET_USER(error,value,addr) error = get_user(value,addr)
105*4882a593Smuzhiyun #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
106*4882a593Smuzhiyun #define PUT_USER(error,value,addr) error = put_user(value,addr)
107*4882a593Smuzhiyun #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #include <linux/uaccess.h>
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #define RCLRVALUE 0xffff
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun static MGSL_PARAMS default_params = {
114*4882a593Smuzhiyun MGSL_MODE_HDLC, /* unsigned long mode */
115*4882a593Smuzhiyun 0, /* unsigned char loopback; */
116*4882a593Smuzhiyun HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
117*4882a593Smuzhiyun HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
118*4882a593Smuzhiyun 0, /* unsigned long clock_speed; */
119*4882a593Smuzhiyun 0xff, /* unsigned char addr_filter; */
120*4882a593Smuzhiyun HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
121*4882a593Smuzhiyun HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
122*4882a593Smuzhiyun HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
123*4882a593Smuzhiyun 9600, /* unsigned long data_rate; */
124*4882a593Smuzhiyun 8, /* unsigned char data_bits; */
125*4882a593Smuzhiyun 1, /* unsigned char stop_bits; */
126*4882a593Smuzhiyun ASYNC_PARITY_NONE /* unsigned char parity; */
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #define SHARED_MEM_ADDRESS_SIZE 0x40000
130*4882a593Smuzhiyun #define BUFFERLISTSIZE 4096
131*4882a593Smuzhiyun #define DMABUFFERSIZE 4096
132*4882a593Smuzhiyun #define MAXRXFRAMES 7
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun typedef struct _DMABUFFERENTRY
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun u32 phys_addr; /* 32-bit flat physical address of data buffer */
137*4882a593Smuzhiyun volatile u16 count; /* buffer size/data count */
138*4882a593Smuzhiyun volatile u16 status; /* Control/status field */
139*4882a593Smuzhiyun volatile u16 rcc; /* character count field */
140*4882a593Smuzhiyun u16 reserved; /* padding required by 16C32 */
141*4882a593Smuzhiyun u32 link; /* 32-bit flat link to next buffer entry */
142*4882a593Smuzhiyun char *virt_addr; /* virtual address of data buffer */
143*4882a593Smuzhiyun u32 phys_entry; /* physical address of this buffer entry */
144*4882a593Smuzhiyun dma_addr_t dma_addr;
145*4882a593Smuzhiyun } DMABUFFERENTRY, *DMAPBUFFERENTRY;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* The queue of BH actions to be performed */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #define BH_RECEIVE 1
150*4882a593Smuzhiyun #define BH_TRANSMIT 2
151*4882a593Smuzhiyun #define BH_STATUS 4
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #define IO_PIN_SHUTDOWN_LIMIT 100
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun struct _input_signal_events {
156*4882a593Smuzhiyun int ri_up;
157*4882a593Smuzhiyun int ri_down;
158*4882a593Smuzhiyun int dsr_up;
159*4882a593Smuzhiyun int dsr_down;
160*4882a593Smuzhiyun int dcd_up;
161*4882a593Smuzhiyun int dcd_down;
162*4882a593Smuzhiyun int cts_up;
163*4882a593Smuzhiyun int cts_down;
164*4882a593Smuzhiyun };
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* transmit holding buffer definitions*/
167*4882a593Smuzhiyun #define MAX_TX_HOLDING_BUFFERS 5
168*4882a593Smuzhiyun struct tx_holding_buffer {
169*4882a593Smuzhiyun int buffer_size;
170*4882a593Smuzhiyun unsigned char * buffer;
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * Device instance data structure
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun struct mgsl_struct {
179*4882a593Smuzhiyun int magic;
180*4882a593Smuzhiyun struct tty_port port;
181*4882a593Smuzhiyun int line;
182*4882a593Smuzhiyun int hw_version;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun struct mgsl_icount icount;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun int timeout;
187*4882a593Smuzhiyun int x_char; /* xon/xoff character */
188*4882a593Smuzhiyun u16 read_status_mask;
189*4882a593Smuzhiyun u16 ignore_status_mask;
190*4882a593Smuzhiyun unsigned char *xmit_buf;
191*4882a593Smuzhiyun int xmit_head;
192*4882a593Smuzhiyun int xmit_tail;
193*4882a593Smuzhiyun int xmit_cnt;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun wait_queue_head_t status_event_wait_q;
196*4882a593Smuzhiyun wait_queue_head_t event_wait_q;
197*4882a593Smuzhiyun struct timer_list tx_timer; /* HDLC transmit timeout timer */
198*4882a593Smuzhiyun struct mgsl_struct *next_device; /* device list link */
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
201*4882a593Smuzhiyun struct work_struct task; /* task structure for scheduling bh */
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun u32 EventMask; /* event trigger mask */
204*4882a593Smuzhiyun u32 RecordedEvents; /* pending events */
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun u32 max_frame_size; /* as set by device config */
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun u32 pending_bh;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun bool bh_running; /* Protection from multiple */
211*4882a593Smuzhiyun int isr_overflow;
212*4882a593Smuzhiyun bool bh_requested;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun int dcd_chkcount; /* check counts to prevent */
215*4882a593Smuzhiyun int cts_chkcount; /* too many IRQs if a signal */
216*4882a593Smuzhiyun int dsr_chkcount; /* is floating */
217*4882a593Smuzhiyun int ri_chkcount;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun char *buffer_list; /* virtual address of Rx & Tx buffer lists */
220*4882a593Smuzhiyun u32 buffer_list_phys;
221*4882a593Smuzhiyun dma_addr_t buffer_list_dma_addr;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
224*4882a593Smuzhiyun DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
225*4882a593Smuzhiyun unsigned int current_rx_buffer;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun int num_tx_dma_buffers; /* number of tx dma frames required */
228*4882a593Smuzhiyun int tx_dma_buffers_used;
229*4882a593Smuzhiyun unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
230*4882a593Smuzhiyun DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
231*4882a593Smuzhiyun int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
232*4882a593Smuzhiyun int current_tx_buffer; /* next tx dma buffer to be loaded */
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun unsigned char *intermediate_rxbuffer;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun int num_tx_holding_buffers; /* number of tx holding buffer allocated */
237*4882a593Smuzhiyun int get_tx_holding_index; /* next tx holding buffer for adapter to load */
238*4882a593Smuzhiyun int put_tx_holding_index; /* next tx holding buffer to store user request */
239*4882a593Smuzhiyun int tx_holding_count; /* number of tx holding buffers waiting */
240*4882a593Smuzhiyun struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun bool rx_enabled;
243*4882a593Smuzhiyun bool rx_overflow;
244*4882a593Smuzhiyun bool rx_rcc_underrun;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun bool tx_enabled;
247*4882a593Smuzhiyun bool tx_active;
248*4882a593Smuzhiyun u32 idle_mode;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun u16 cmr_value;
251*4882a593Smuzhiyun u16 tcsr_value;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun char device_name[25]; /* device instance name */
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun unsigned char bus; /* expansion bus number (zero based) */
256*4882a593Smuzhiyun unsigned char function; /* PCI device number */
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun unsigned int io_base; /* base I/O address of adapter */
259*4882a593Smuzhiyun unsigned int io_addr_size; /* size of the I/O address range */
260*4882a593Smuzhiyun bool io_addr_requested; /* true if I/O address requested */
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun unsigned int irq_level; /* interrupt level */
263*4882a593Smuzhiyun unsigned long irq_flags;
264*4882a593Smuzhiyun bool irq_requested; /* true if IRQ requested */
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun unsigned int dma_level; /* DMA channel */
267*4882a593Smuzhiyun bool dma_requested; /* true if dma channel requested */
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun u16 mbre_bit;
270*4882a593Smuzhiyun u16 loopback_bits;
271*4882a593Smuzhiyun u16 usc_idle_mode;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun MGSL_PARAMS params; /* communications parameters */
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun unsigned char serial_signals; /* current serial signal states */
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun bool irq_occurred; /* for diagnostics use */
278*4882a593Smuzhiyun unsigned int init_error; /* Initialization startup error (DIAGS) */
279*4882a593Smuzhiyun int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun u32 last_mem_alloc;
282*4882a593Smuzhiyun unsigned char* memory_base; /* shared memory address (PCI only) */
283*4882a593Smuzhiyun u32 phys_memory_base;
284*4882a593Smuzhiyun bool shared_mem_requested;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun unsigned char* lcr_base; /* local config registers (PCI only) */
287*4882a593Smuzhiyun u32 phys_lcr_base;
288*4882a593Smuzhiyun u32 lcr_offset;
289*4882a593Smuzhiyun bool lcr_mem_requested;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun u32 misc_ctrl_value;
292*4882a593Smuzhiyun char *flag_buf;
293*4882a593Smuzhiyun bool drop_rts_on_tx_done;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun bool loopmode_insert_requested;
296*4882a593Smuzhiyun bool loopmode_send_done_requested;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun struct _input_signal_events input_signal_events;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* generic HDLC device parts */
301*4882a593Smuzhiyun int netcount;
302*4882a593Smuzhiyun spinlock_t netlock;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
305*4882a593Smuzhiyun struct net_device *netdev;
306*4882a593Smuzhiyun #endif
307*4882a593Smuzhiyun };
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun #define MGSL_MAGIC 0x5401
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * The size of the serial xmit buffer is 1 page, or 4096 bytes
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun #ifndef SERIAL_XMIT_SIZE
315*4882a593Smuzhiyun #define SERIAL_XMIT_SIZE 4096
316*4882a593Smuzhiyun #endif
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * These macros define the offsets used in calculating the
320*4882a593Smuzhiyun * I/O address of the specified USC registers.
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun #define DCPIN 2 /* Bit 1 of I/O address */
325*4882a593Smuzhiyun #define SDPIN 4 /* Bit 2 of I/O address */
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun #define DCAR 0 /* DMA command/address register */
328*4882a593Smuzhiyun #define CCAR SDPIN /* channel command/address register */
329*4882a593Smuzhiyun #define DATAREG DCPIN + SDPIN /* serial data register */
330*4882a593Smuzhiyun #define MSBONLY 0x41
331*4882a593Smuzhiyun #define LSBONLY 0x40
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * These macros define the register address (ordinal number)
335*4882a593Smuzhiyun * used for writing address/value pairs to the USC.
336*4882a593Smuzhiyun */
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun #define CMR 0x02 /* Channel mode Register */
339*4882a593Smuzhiyun #define CCSR 0x04 /* Channel Command/status Register */
340*4882a593Smuzhiyun #define CCR 0x06 /* Channel Control Register */
341*4882a593Smuzhiyun #define PSR 0x08 /* Port status Register */
342*4882a593Smuzhiyun #define PCR 0x0a /* Port Control Register */
343*4882a593Smuzhiyun #define TMDR 0x0c /* Test mode Data Register */
344*4882a593Smuzhiyun #define TMCR 0x0e /* Test mode Control Register */
345*4882a593Smuzhiyun #define CMCR 0x10 /* Clock mode Control Register */
346*4882a593Smuzhiyun #define HCR 0x12 /* Hardware Configuration Register */
347*4882a593Smuzhiyun #define IVR 0x14 /* Interrupt Vector Register */
348*4882a593Smuzhiyun #define IOCR 0x16 /* Input/Output Control Register */
349*4882a593Smuzhiyun #define ICR 0x18 /* Interrupt Control Register */
350*4882a593Smuzhiyun #define DCCR 0x1a /* Daisy Chain Control Register */
351*4882a593Smuzhiyun #define MISR 0x1c /* Misc Interrupt status Register */
352*4882a593Smuzhiyun #define SICR 0x1e /* status Interrupt Control Register */
353*4882a593Smuzhiyun #define RDR 0x20 /* Receive Data Register */
354*4882a593Smuzhiyun #define RMR 0x22 /* Receive mode Register */
355*4882a593Smuzhiyun #define RCSR 0x24 /* Receive Command/status Register */
356*4882a593Smuzhiyun #define RICR 0x26 /* Receive Interrupt Control Register */
357*4882a593Smuzhiyun #define RSR 0x28 /* Receive Sync Register */
358*4882a593Smuzhiyun #define RCLR 0x2a /* Receive count Limit Register */
359*4882a593Smuzhiyun #define RCCR 0x2c /* Receive Character count Register */
360*4882a593Smuzhiyun #define TC0R 0x2e /* Time Constant 0 Register */
361*4882a593Smuzhiyun #define TDR 0x30 /* Transmit Data Register */
362*4882a593Smuzhiyun #define TMR 0x32 /* Transmit mode Register */
363*4882a593Smuzhiyun #define TCSR 0x34 /* Transmit Command/status Register */
364*4882a593Smuzhiyun #define TICR 0x36 /* Transmit Interrupt Control Register */
365*4882a593Smuzhiyun #define TSR 0x38 /* Transmit Sync Register */
366*4882a593Smuzhiyun #define TCLR 0x3a /* Transmit count Limit Register */
367*4882a593Smuzhiyun #define TCCR 0x3c /* Transmit Character count Register */
368*4882a593Smuzhiyun #define TC1R 0x3e /* Time Constant 1 Register */
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * MACRO DEFINITIONS FOR DMA REGISTERS
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun #define DCR 0x06 /* DMA Control Register (shared) */
376*4882a593Smuzhiyun #define DACR 0x08 /* DMA Array count Register (shared) */
377*4882a593Smuzhiyun #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
378*4882a593Smuzhiyun #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
379*4882a593Smuzhiyun #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
380*4882a593Smuzhiyun #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
381*4882a593Smuzhiyun #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun #define TDMR 0x02 /* Transmit DMA mode Register */
384*4882a593Smuzhiyun #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
385*4882a593Smuzhiyun #define TBCR 0x2a /* Transmit Byte count Register */
386*4882a593Smuzhiyun #define TARL 0x2c /* Transmit Address Register (low) */
387*4882a593Smuzhiyun #define TARU 0x2e /* Transmit Address Register (high) */
388*4882a593Smuzhiyun #define NTBCR 0x3a /* Next Transmit Byte count Register */
389*4882a593Smuzhiyun #define NTARL 0x3c /* Next Transmit Address Register (low) */
390*4882a593Smuzhiyun #define NTARU 0x3e /* Next Transmit Address Register (high) */
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
393*4882a593Smuzhiyun #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
394*4882a593Smuzhiyun #define RBCR 0xaa /* Receive Byte count Register */
395*4882a593Smuzhiyun #define RARL 0xac /* Receive Address Register (low) */
396*4882a593Smuzhiyun #define RARU 0xae /* Receive Address Register (high) */
397*4882a593Smuzhiyun #define NRBCR 0xba /* Next Receive Byte count Register */
398*4882a593Smuzhiyun #define NRARL 0xbc /* Next Receive Address Register (low) */
399*4882a593Smuzhiyun #define NRARU 0xbe /* Next Receive Address Register (high) */
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * MACRO DEFINITIONS FOR MODEM STATUS BITS
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun #define MODEMSTATUS_DTR 0x80
407*4882a593Smuzhiyun #define MODEMSTATUS_DSR 0x40
408*4882a593Smuzhiyun #define MODEMSTATUS_RTS 0x20
409*4882a593Smuzhiyun #define MODEMSTATUS_CTS 0x10
410*4882a593Smuzhiyun #define MODEMSTATUS_RI 0x04
411*4882a593Smuzhiyun #define MODEMSTATUS_DCD 0x01
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * Channel Command/Address Register (CCAR) Command Codes
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun #define RTCmd_Null 0x0000
419*4882a593Smuzhiyun #define RTCmd_ResetHighestIus 0x1000
420*4882a593Smuzhiyun #define RTCmd_TriggerChannelLoadDma 0x2000
421*4882a593Smuzhiyun #define RTCmd_TriggerRxDma 0x2800
422*4882a593Smuzhiyun #define RTCmd_TriggerTxDma 0x3000
423*4882a593Smuzhiyun #define RTCmd_TriggerRxAndTxDma 0x3800
424*4882a593Smuzhiyun #define RTCmd_PurgeRxFifo 0x4800
425*4882a593Smuzhiyun #define RTCmd_PurgeTxFifo 0x5000
426*4882a593Smuzhiyun #define RTCmd_PurgeRxAndTxFifo 0x5800
427*4882a593Smuzhiyun #define RTCmd_LoadRcc 0x6800
428*4882a593Smuzhiyun #define RTCmd_LoadTcc 0x7000
429*4882a593Smuzhiyun #define RTCmd_LoadRccAndTcc 0x7800
430*4882a593Smuzhiyun #define RTCmd_LoadTC0 0x8800
431*4882a593Smuzhiyun #define RTCmd_LoadTC1 0x9000
432*4882a593Smuzhiyun #define RTCmd_LoadTC0AndTC1 0x9800
433*4882a593Smuzhiyun #define RTCmd_SerialDataLSBFirst 0xa000
434*4882a593Smuzhiyun #define RTCmd_SerialDataMSBFirst 0xa800
435*4882a593Smuzhiyun #define RTCmd_SelectBigEndian 0xb000
436*4882a593Smuzhiyun #define RTCmd_SelectLittleEndian 0xb800
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /*
440*4882a593Smuzhiyun * DMA Command/Address Register (DCAR) Command Codes
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun #define DmaCmd_Null 0x0000
444*4882a593Smuzhiyun #define DmaCmd_ResetTxChannel 0x1000
445*4882a593Smuzhiyun #define DmaCmd_ResetRxChannel 0x1200
446*4882a593Smuzhiyun #define DmaCmd_StartTxChannel 0x2000
447*4882a593Smuzhiyun #define DmaCmd_StartRxChannel 0x2200
448*4882a593Smuzhiyun #define DmaCmd_ContinueTxChannel 0x3000
449*4882a593Smuzhiyun #define DmaCmd_ContinueRxChannel 0x3200
450*4882a593Smuzhiyun #define DmaCmd_PauseTxChannel 0x4000
451*4882a593Smuzhiyun #define DmaCmd_PauseRxChannel 0x4200
452*4882a593Smuzhiyun #define DmaCmd_AbortTxChannel 0x5000
453*4882a593Smuzhiyun #define DmaCmd_AbortRxChannel 0x5200
454*4882a593Smuzhiyun #define DmaCmd_InitTxChannel 0x7000
455*4882a593Smuzhiyun #define DmaCmd_InitRxChannel 0x7200
456*4882a593Smuzhiyun #define DmaCmd_ResetHighestDmaIus 0x8000
457*4882a593Smuzhiyun #define DmaCmd_ResetAllChannels 0x9000
458*4882a593Smuzhiyun #define DmaCmd_StartAllChannels 0xa000
459*4882a593Smuzhiyun #define DmaCmd_ContinueAllChannels 0xb000
460*4882a593Smuzhiyun #define DmaCmd_PauseAllChannels 0xc000
461*4882a593Smuzhiyun #define DmaCmd_AbortAllChannels 0xd000
462*4882a593Smuzhiyun #define DmaCmd_InitAllChannels 0xf000
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun #define TCmd_Null 0x0000
465*4882a593Smuzhiyun #define TCmd_ClearTxCRC 0x2000
466*4882a593Smuzhiyun #define TCmd_SelectTicrTtsaData 0x4000
467*4882a593Smuzhiyun #define TCmd_SelectTicrTxFifostatus 0x5000
468*4882a593Smuzhiyun #define TCmd_SelectTicrIntLevel 0x6000
469*4882a593Smuzhiyun #define TCmd_SelectTicrdma_level 0x7000
470*4882a593Smuzhiyun #define TCmd_SendFrame 0x8000
471*4882a593Smuzhiyun #define TCmd_SendAbort 0x9000
472*4882a593Smuzhiyun #define TCmd_EnableDleInsertion 0xc000
473*4882a593Smuzhiyun #define TCmd_DisableDleInsertion 0xd000
474*4882a593Smuzhiyun #define TCmd_ClearEofEom 0xe000
475*4882a593Smuzhiyun #define TCmd_SetEofEom 0xf000
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun #define RCmd_Null 0x0000
478*4882a593Smuzhiyun #define RCmd_ClearRxCRC 0x2000
479*4882a593Smuzhiyun #define RCmd_EnterHuntmode 0x3000
480*4882a593Smuzhiyun #define RCmd_SelectRicrRtsaData 0x4000
481*4882a593Smuzhiyun #define RCmd_SelectRicrRxFifostatus 0x5000
482*4882a593Smuzhiyun #define RCmd_SelectRicrIntLevel 0x6000
483*4882a593Smuzhiyun #define RCmd_SelectRicrdma_level 0x7000
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /*
486*4882a593Smuzhiyun * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun #define RECEIVE_STATUS BIT5
490*4882a593Smuzhiyun #define RECEIVE_DATA BIT4
491*4882a593Smuzhiyun #define TRANSMIT_STATUS BIT3
492*4882a593Smuzhiyun #define TRANSMIT_DATA BIT2
493*4882a593Smuzhiyun #define IO_PIN BIT1
494*4882a593Smuzhiyun #define MISC BIT0
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /*
498*4882a593Smuzhiyun * Receive status Bits in Receive Command/status Register RCSR
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun #define RXSTATUS_SHORT_FRAME BIT8
502*4882a593Smuzhiyun #define RXSTATUS_CODE_VIOLATION BIT8
503*4882a593Smuzhiyun #define RXSTATUS_EXITED_HUNT BIT7
504*4882a593Smuzhiyun #define RXSTATUS_IDLE_RECEIVED BIT6
505*4882a593Smuzhiyun #define RXSTATUS_BREAK_RECEIVED BIT5
506*4882a593Smuzhiyun #define RXSTATUS_ABORT_RECEIVED BIT5
507*4882a593Smuzhiyun #define RXSTATUS_RXBOUND BIT4
508*4882a593Smuzhiyun #define RXSTATUS_CRC_ERROR BIT3
509*4882a593Smuzhiyun #define RXSTATUS_FRAMING_ERROR BIT3
510*4882a593Smuzhiyun #define RXSTATUS_ABORT BIT2
511*4882a593Smuzhiyun #define RXSTATUS_PARITY_ERROR BIT2
512*4882a593Smuzhiyun #define RXSTATUS_OVERRUN BIT1
513*4882a593Smuzhiyun #define RXSTATUS_DATA_AVAILABLE BIT0
514*4882a593Smuzhiyun #define RXSTATUS_ALL 0x01f6
515*4882a593Smuzhiyun #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun * Values for setting transmit idle mode in
519*4882a593Smuzhiyun * Transmit Control/status Register (TCSR)
520*4882a593Smuzhiyun */
521*4882a593Smuzhiyun #define IDLEMODE_FLAGS 0x0000
522*4882a593Smuzhiyun #define IDLEMODE_ALT_ONE_ZERO 0x0100
523*4882a593Smuzhiyun #define IDLEMODE_ZERO 0x0200
524*4882a593Smuzhiyun #define IDLEMODE_ONE 0x0300
525*4882a593Smuzhiyun #define IDLEMODE_ALT_MARK_SPACE 0x0500
526*4882a593Smuzhiyun #define IDLEMODE_SPACE 0x0600
527*4882a593Smuzhiyun #define IDLEMODE_MARK 0x0700
528*4882a593Smuzhiyun #define IDLEMODE_MASK 0x0700
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /*
531*4882a593Smuzhiyun * IUSC revision identifiers
532*4882a593Smuzhiyun */
533*4882a593Smuzhiyun #define IUSC_SL1660 0x4d44
534*4882a593Smuzhiyun #define IUSC_PRE_SL1660 0x4553
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun * Transmit status Bits in Transmit Command/status Register (TCSR)
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun #define TCSR_PRESERVE 0x0F00
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun #define TCSR_UNDERWAIT BIT11
543*4882a593Smuzhiyun #define TXSTATUS_PREAMBLE_SENT BIT7
544*4882a593Smuzhiyun #define TXSTATUS_IDLE_SENT BIT6
545*4882a593Smuzhiyun #define TXSTATUS_ABORT_SENT BIT5
546*4882a593Smuzhiyun #define TXSTATUS_EOF_SENT BIT4
547*4882a593Smuzhiyun #define TXSTATUS_EOM_SENT BIT4
548*4882a593Smuzhiyun #define TXSTATUS_CRC_SENT BIT3
549*4882a593Smuzhiyun #define TXSTATUS_ALL_SENT BIT2
550*4882a593Smuzhiyun #define TXSTATUS_UNDERRUN BIT1
551*4882a593Smuzhiyun #define TXSTATUS_FIFO_EMPTY BIT0
552*4882a593Smuzhiyun #define TXSTATUS_ALL 0x00fa
553*4882a593Smuzhiyun #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun #define MISCSTATUS_RXC_LATCHED BIT15
557*4882a593Smuzhiyun #define MISCSTATUS_RXC BIT14
558*4882a593Smuzhiyun #define MISCSTATUS_TXC_LATCHED BIT13
559*4882a593Smuzhiyun #define MISCSTATUS_TXC BIT12
560*4882a593Smuzhiyun #define MISCSTATUS_RI_LATCHED BIT11
561*4882a593Smuzhiyun #define MISCSTATUS_RI BIT10
562*4882a593Smuzhiyun #define MISCSTATUS_DSR_LATCHED BIT9
563*4882a593Smuzhiyun #define MISCSTATUS_DSR BIT8
564*4882a593Smuzhiyun #define MISCSTATUS_DCD_LATCHED BIT7
565*4882a593Smuzhiyun #define MISCSTATUS_DCD BIT6
566*4882a593Smuzhiyun #define MISCSTATUS_CTS_LATCHED BIT5
567*4882a593Smuzhiyun #define MISCSTATUS_CTS BIT4
568*4882a593Smuzhiyun #define MISCSTATUS_RCC_UNDERRUN BIT3
569*4882a593Smuzhiyun #define MISCSTATUS_DPLL_NO_SYNC BIT2
570*4882a593Smuzhiyun #define MISCSTATUS_BRG1_ZERO BIT1
571*4882a593Smuzhiyun #define MISCSTATUS_BRG0_ZERO BIT0
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
574*4882a593Smuzhiyun #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun #define SICR_RXC_ACTIVE BIT15
577*4882a593Smuzhiyun #define SICR_RXC_INACTIVE BIT14
578*4882a593Smuzhiyun #define SICR_RXC (BIT15|BIT14)
579*4882a593Smuzhiyun #define SICR_TXC_ACTIVE BIT13
580*4882a593Smuzhiyun #define SICR_TXC_INACTIVE BIT12
581*4882a593Smuzhiyun #define SICR_TXC (BIT13|BIT12)
582*4882a593Smuzhiyun #define SICR_RI_ACTIVE BIT11
583*4882a593Smuzhiyun #define SICR_RI_INACTIVE BIT10
584*4882a593Smuzhiyun #define SICR_RI (BIT11|BIT10)
585*4882a593Smuzhiyun #define SICR_DSR_ACTIVE BIT9
586*4882a593Smuzhiyun #define SICR_DSR_INACTIVE BIT8
587*4882a593Smuzhiyun #define SICR_DSR (BIT9|BIT8)
588*4882a593Smuzhiyun #define SICR_DCD_ACTIVE BIT7
589*4882a593Smuzhiyun #define SICR_DCD_INACTIVE BIT6
590*4882a593Smuzhiyun #define SICR_DCD (BIT7|BIT6)
591*4882a593Smuzhiyun #define SICR_CTS_ACTIVE BIT5
592*4882a593Smuzhiyun #define SICR_CTS_INACTIVE BIT4
593*4882a593Smuzhiyun #define SICR_CTS (BIT5|BIT4)
594*4882a593Smuzhiyun #define SICR_RCC_UNDERFLOW BIT3
595*4882a593Smuzhiyun #define SICR_DPLL_NO_SYNC BIT2
596*4882a593Smuzhiyun #define SICR_BRG1_ZERO BIT1
597*4882a593Smuzhiyun #define SICR_BRG0_ZERO BIT0
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun void usc_DisableMasterIrqBit( struct mgsl_struct *info );
600*4882a593Smuzhiyun void usc_EnableMasterIrqBit( struct mgsl_struct *info );
601*4882a593Smuzhiyun void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
602*4882a593Smuzhiyun void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
603*4882a593Smuzhiyun void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun #define usc_EnableInterrupts( a, b ) \
606*4882a593Smuzhiyun usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun #define usc_DisableInterrupts( a, b ) \
609*4882a593Smuzhiyun usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun #define usc_EnableMasterIrqBit(a) \
612*4882a593Smuzhiyun usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun #define usc_DisableMasterIrqBit(a) \
615*4882a593Smuzhiyun usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /*
620*4882a593Smuzhiyun * Transmit status Bits in Transmit Control status Register (TCSR)
621*4882a593Smuzhiyun * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
622*4882a593Smuzhiyun */
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun #define TXSTATUS_PREAMBLE_SENT BIT7
625*4882a593Smuzhiyun #define TXSTATUS_IDLE_SENT BIT6
626*4882a593Smuzhiyun #define TXSTATUS_ABORT_SENT BIT5
627*4882a593Smuzhiyun #define TXSTATUS_EOF BIT4
628*4882a593Smuzhiyun #define TXSTATUS_CRC_SENT BIT3
629*4882a593Smuzhiyun #define TXSTATUS_ALL_SENT BIT2
630*4882a593Smuzhiyun #define TXSTATUS_UNDERRUN BIT1
631*4882a593Smuzhiyun #define TXSTATUS_FIFO_EMPTY BIT0
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun #define DICR_MASTER BIT15
634*4882a593Smuzhiyun #define DICR_TRANSMIT BIT0
635*4882a593Smuzhiyun #define DICR_RECEIVE BIT1
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun #define usc_EnableDmaInterrupts(a,b) \
638*4882a593Smuzhiyun usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun #define usc_DisableDmaInterrupts(a,b) \
641*4882a593Smuzhiyun usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun #define usc_EnableStatusIrqs(a,b) \
644*4882a593Smuzhiyun usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun #define usc_DisablestatusIrqs(a,b) \
647*4882a593Smuzhiyun usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /* Transmit status Bits in Transmit Control status Register (TCSR) */
650*4882a593Smuzhiyun /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun #define DISABLE_UNCONDITIONAL 0
654*4882a593Smuzhiyun #define DISABLE_END_OF_FRAME 1
655*4882a593Smuzhiyun #define ENABLE_UNCONDITIONAL 2
656*4882a593Smuzhiyun #define ENABLE_AUTO_CTS 3
657*4882a593Smuzhiyun #define ENABLE_AUTO_DCD 3
658*4882a593Smuzhiyun #define usc_EnableTransmitter(a,b) \
659*4882a593Smuzhiyun usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
660*4882a593Smuzhiyun #define usc_EnableReceiver(a,b) \
661*4882a593Smuzhiyun usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
664*4882a593Smuzhiyun static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
665*4882a593Smuzhiyun static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
668*4882a593Smuzhiyun static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
669*4882a593Smuzhiyun static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
670*4882a593Smuzhiyun void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
671*4882a593Smuzhiyun void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
674*4882a593Smuzhiyun #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
679*4882a593Smuzhiyun static void usc_start_receiver( struct mgsl_struct *info );
680*4882a593Smuzhiyun static void usc_stop_receiver( struct mgsl_struct *info );
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun static void usc_start_transmitter( struct mgsl_struct *info );
683*4882a593Smuzhiyun static void usc_stop_transmitter( struct mgsl_struct *info );
684*4882a593Smuzhiyun static void usc_set_txidle( struct mgsl_struct *info );
685*4882a593Smuzhiyun static void usc_load_txfifo( struct mgsl_struct *info );
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
688*4882a593Smuzhiyun static void usc_enable_loopback( struct mgsl_struct *info, int enable );
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun static void usc_get_serial_signals( struct mgsl_struct *info );
691*4882a593Smuzhiyun static void usc_set_serial_signals( struct mgsl_struct *info );
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun static void usc_reset( struct mgsl_struct *info );
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun static void usc_set_sync_mode( struct mgsl_struct *info );
696*4882a593Smuzhiyun static void usc_set_sdlc_mode( struct mgsl_struct *info );
697*4882a593Smuzhiyun static void usc_set_async_mode( struct mgsl_struct *info );
698*4882a593Smuzhiyun static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun static void usc_loopback_frame( struct mgsl_struct *info );
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun static void mgsl_tx_timeout(struct timer_list *t);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
706*4882a593Smuzhiyun static void usc_loopmode_insert_request( struct mgsl_struct * info );
707*4882a593Smuzhiyun static int usc_loopmode_active( struct mgsl_struct * info);
708*4882a593Smuzhiyun static void usc_loopmode_send_done( struct mgsl_struct * info );
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
713*4882a593Smuzhiyun #define dev_to_port(D) (dev_to_hdlc(D)->priv)
714*4882a593Smuzhiyun static void hdlcdev_tx_done(struct mgsl_struct *info);
715*4882a593Smuzhiyun static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
716*4882a593Smuzhiyun static int hdlcdev_init(struct mgsl_struct *info);
717*4882a593Smuzhiyun static void hdlcdev_exit(struct mgsl_struct *info);
718*4882a593Smuzhiyun #endif
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /*
721*4882a593Smuzhiyun * Defines a BUS descriptor value for the PCI adapter
722*4882a593Smuzhiyun * local bus address ranges.
723*4882a593Smuzhiyun */
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
726*4882a593Smuzhiyun (0x00400020 + \
727*4882a593Smuzhiyun ((WrHold) << 30) + \
728*4882a593Smuzhiyun ((WrDly) << 28) + \
729*4882a593Smuzhiyun ((RdDly) << 26) + \
730*4882a593Smuzhiyun ((Nwdd) << 20) + \
731*4882a593Smuzhiyun ((Nwad) << 15) + \
732*4882a593Smuzhiyun ((Nxda) << 13) + \
733*4882a593Smuzhiyun ((Nrdd) << 11) + \
734*4882a593Smuzhiyun ((Nrad) << 6) )
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /*
739*4882a593Smuzhiyun * Adapter diagnostic routines
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun static bool mgsl_register_test( struct mgsl_struct *info );
742*4882a593Smuzhiyun static bool mgsl_irq_test( struct mgsl_struct *info );
743*4882a593Smuzhiyun static bool mgsl_dma_test( struct mgsl_struct *info );
744*4882a593Smuzhiyun static bool mgsl_memory_test( struct mgsl_struct *info );
745*4882a593Smuzhiyun static int mgsl_adapter_test( struct mgsl_struct *info );
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /*
748*4882a593Smuzhiyun * device and resource management routines
749*4882a593Smuzhiyun */
750*4882a593Smuzhiyun static int mgsl_claim_resources(struct mgsl_struct *info);
751*4882a593Smuzhiyun static void mgsl_release_resources(struct mgsl_struct *info);
752*4882a593Smuzhiyun static void mgsl_add_device(struct mgsl_struct *info);
753*4882a593Smuzhiyun static struct mgsl_struct* mgsl_allocate_device(void);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * DMA buffer manupulation functions.
757*4882a593Smuzhiyun */
758*4882a593Smuzhiyun static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
759*4882a593Smuzhiyun static bool mgsl_get_rx_frame( struct mgsl_struct *info );
760*4882a593Smuzhiyun static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
761*4882a593Smuzhiyun static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
762*4882a593Smuzhiyun static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
763*4882a593Smuzhiyun static int num_free_tx_dma_buffers(struct mgsl_struct *info);
764*4882a593Smuzhiyun static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
765*4882a593Smuzhiyun static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun * DMA and Shared Memory buffer allocation and formatting
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
771*4882a593Smuzhiyun static void mgsl_free_dma_buffers(struct mgsl_struct *info);
772*4882a593Smuzhiyun static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
773*4882a593Smuzhiyun static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
774*4882a593Smuzhiyun static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
775*4882a593Smuzhiyun static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
776*4882a593Smuzhiyun static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
777*4882a593Smuzhiyun static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
778*4882a593Smuzhiyun static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
779*4882a593Smuzhiyun static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
780*4882a593Smuzhiyun static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
781*4882a593Smuzhiyun static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /*
784*4882a593Smuzhiyun * Bottom half interrupt handlers
785*4882a593Smuzhiyun */
786*4882a593Smuzhiyun static void mgsl_bh_handler(struct work_struct *work);
787*4882a593Smuzhiyun static void mgsl_bh_receive(struct mgsl_struct *info);
788*4882a593Smuzhiyun static void mgsl_bh_transmit(struct mgsl_struct *info);
789*4882a593Smuzhiyun static void mgsl_bh_status(struct mgsl_struct *info);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun * Interrupt handler routines and dispatch table.
793*4882a593Smuzhiyun */
794*4882a593Smuzhiyun static void mgsl_isr_null( struct mgsl_struct *info );
795*4882a593Smuzhiyun static void mgsl_isr_transmit_data( struct mgsl_struct *info );
796*4882a593Smuzhiyun static void mgsl_isr_receive_data( struct mgsl_struct *info );
797*4882a593Smuzhiyun static void mgsl_isr_receive_status( struct mgsl_struct *info );
798*4882a593Smuzhiyun static void mgsl_isr_transmit_status( struct mgsl_struct *info );
799*4882a593Smuzhiyun static void mgsl_isr_io_pin( struct mgsl_struct *info );
800*4882a593Smuzhiyun static void mgsl_isr_misc( struct mgsl_struct *info );
801*4882a593Smuzhiyun static void mgsl_isr_receive_dma( struct mgsl_struct *info );
802*4882a593Smuzhiyun static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun typedef void (*isr_dispatch_func)(struct mgsl_struct *);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun static isr_dispatch_func UscIsrTable[7] =
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun mgsl_isr_null,
809*4882a593Smuzhiyun mgsl_isr_misc,
810*4882a593Smuzhiyun mgsl_isr_io_pin,
811*4882a593Smuzhiyun mgsl_isr_transmit_data,
812*4882a593Smuzhiyun mgsl_isr_transmit_status,
813*4882a593Smuzhiyun mgsl_isr_receive_data,
814*4882a593Smuzhiyun mgsl_isr_receive_status
815*4882a593Smuzhiyun };
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /*
818*4882a593Smuzhiyun * ioctl call handlers
819*4882a593Smuzhiyun */
820*4882a593Smuzhiyun static int tiocmget(struct tty_struct *tty);
821*4882a593Smuzhiyun static int tiocmset(struct tty_struct *tty,
822*4882a593Smuzhiyun unsigned int set, unsigned int clear);
823*4882a593Smuzhiyun static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
824*4882a593Smuzhiyun __user *user_icount);
825*4882a593Smuzhiyun static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
826*4882a593Smuzhiyun static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
827*4882a593Smuzhiyun static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
828*4882a593Smuzhiyun static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
829*4882a593Smuzhiyun static int mgsl_txenable(struct mgsl_struct * info, int enable);
830*4882a593Smuzhiyun static int mgsl_txabort(struct mgsl_struct * info);
831*4882a593Smuzhiyun static int mgsl_rxenable(struct mgsl_struct * info, int enable);
832*4882a593Smuzhiyun static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
833*4882a593Smuzhiyun static int mgsl_loopmode_send_done( struct mgsl_struct * info );
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* set non-zero on successful registration with PCI subsystem */
836*4882a593Smuzhiyun static bool pci_registered;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /*
839*4882a593Smuzhiyun * Global linked list of SyncLink devices
840*4882a593Smuzhiyun */
841*4882a593Smuzhiyun static struct mgsl_struct *mgsl_device_list;
842*4882a593Smuzhiyun static int mgsl_device_count;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /*
845*4882a593Smuzhiyun * Set this param to non-zero to load eax with the
846*4882a593Smuzhiyun * .text section address and breakpoint on module load.
847*4882a593Smuzhiyun * This is useful for use with gdb and add-symbol-file command.
848*4882a593Smuzhiyun */
849*4882a593Smuzhiyun static bool break_on_load;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /*
852*4882a593Smuzhiyun * Driver major number, defaults to zero to get auto
853*4882a593Smuzhiyun * assigned major number. May be forced as module parameter.
854*4882a593Smuzhiyun */
855*4882a593Smuzhiyun static int ttymajor;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * Array of user specified options for ISA adapters.
859*4882a593Smuzhiyun */
860*4882a593Smuzhiyun static int io[MAX_ISA_DEVICES];
861*4882a593Smuzhiyun static int irq[MAX_ISA_DEVICES];
862*4882a593Smuzhiyun static int dma[MAX_ISA_DEVICES];
863*4882a593Smuzhiyun static int debug_level;
864*4882a593Smuzhiyun static int maxframe[MAX_TOTAL_DEVICES];
865*4882a593Smuzhiyun static int txdmabufs[MAX_TOTAL_DEVICES];
866*4882a593Smuzhiyun static int txholdbufs[MAX_TOTAL_DEVICES];
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun module_param(break_on_load, bool, 0);
869*4882a593Smuzhiyun module_param(ttymajor, int, 0);
870*4882a593Smuzhiyun module_param_hw_array(io, int, ioport, NULL, 0);
871*4882a593Smuzhiyun module_param_hw_array(irq, int, irq, NULL, 0);
872*4882a593Smuzhiyun module_param_hw_array(dma, int, dma, NULL, 0);
873*4882a593Smuzhiyun module_param(debug_level, int, 0);
874*4882a593Smuzhiyun module_param_array(maxframe, int, NULL, 0);
875*4882a593Smuzhiyun module_param_array(txdmabufs, int, NULL, 0);
876*4882a593Smuzhiyun module_param_array(txholdbufs, int, NULL, 0);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun static char *driver_name = "SyncLink serial driver";
879*4882a593Smuzhiyun static char *driver_version = "$Revision: 4.38 $";
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun static int synclink_init_one (struct pci_dev *dev,
882*4882a593Smuzhiyun const struct pci_device_id *ent);
883*4882a593Smuzhiyun static void synclink_remove_one (struct pci_dev *dev);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun static const struct pci_device_id synclink_pci_tbl[] = {
886*4882a593Smuzhiyun { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
887*4882a593Smuzhiyun { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
888*4882a593Smuzhiyun { 0, }, /* terminate list */
889*4882a593Smuzhiyun };
890*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun MODULE_LICENSE("GPL");
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun static struct pci_driver synclink_pci_driver = {
895*4882a593Smuzhiyun .name = "synclink",
896*4882a593Smuzhiyun .id_table = synclink_pci_tbl,
897*4882a593Smuzhiyun .probe = synclink_init_one,
898*4882a593Smuzhiyun .remove = synclink_remove_one,
899*4882a593Smuzhiyun };
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun static struct tty_driver *serial_driver;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /* number of characters left in xmit buffer before we ask for more */
904*4882a593Smuzhiyun #define WAKEUP_CHARS 256
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun static void mgsl_change_params(struct mgsl_struct *info);
908*4882a593Smuzhiyun static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /*
911*4882a593Smuzhiyun * 1st function defined in .text section. Calling this function in
912*4882a593Smuzhiyun * init_module() followed by a breakpoint allows a remote debugger
913*4882a593Smuzhiyun * (gdb) to get the .text address for the add-symbol-file command.
914*4882a593Smuzhiyun * This allows remote debugging of dynamically loadable modules.
915*4882a593Smuzhiyun */
mgsl_get_text_ptr(void)916*4882a593Smuzhiyun static void* mgsl_get_text_ptr(void)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun return mgsl_get_text_ptr;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
mgsl_paranoia_check(struct mgsl_struct * info,char * name,const char * routine)921*4882a593Smuzhiyun static inline int mgsl_paranoia_check(struct mgsl_struct *info,
922*4882a593Smuzhiyun char *name, const char *routine)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun #ifdef MGSL_PARANOIA_CHECK
925*4882a593Smuzhiyun static const char *badmagic =
926*4882a593Smuzhiyun "Warning: bad magic number for mgsl struct (%s) in %s\n";
927*4882a593Smuzhiyun static const char *badinfo =
928*4882a593Smuzhiyun "Warning: null mgsl_struct for (%s) in %s\n";
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (!info) {
931*4882a593Smuzhiyun printk(badinfo, name, routine);
932*4882a593Smuzhiyun return 1;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun if (info->magic != MGSL_MAGIC) {
935*4882a593Smuzhiyun printk(badmagic, name, routine);
936*4882a593Smuzhiyun return 1;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun #else
939*4882a593Smuzhiyun if (!info)
940*4882a593Smuzhiyun return 1;
941*4882a593Smuzhiyun #endif
942*4882a593Smuzhiyun return 0;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /*
946*4882a593Smuzhiyun * line discipline callback wrappers
947*4882a593Smuzhiyun *
948*4882a593Smuzhiyun * The wrappers maintain line discipline references
949*4882a593Smuzhiyun * while calling into the line discipline.
950*4882a593Smuzhiyun *
951*4882a593Smuzhiyun * ldisc_receive_buf - pass receive data to line discipline
952*4882a593Smuzhiyun */
953*4882a593Smuzhiyun
ldisc_receive_buf(struct tty_struct * tty,const __u8 * data,char * flags,int count)954*4882a593Smuzhiyun static void ldisc_receive_buf(struct tty_struct *tty,
955*4882a593Smuzhiyun const __u8 *data, char *flags, int count)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun struct tty_ldisc *ld;
958*4882a593Smuzhiyun if (!tty)
959*4882a593Smuzhiyun return;
960*4882a593Smuzhiyun ld = tty_ldisc_ref(tty);
961*4882a593Smuzhiyun if (ld) {
962*4882a593Smuzhiyun if (ld->ops->receive_buf)
963*4882a593Smuzhiyun ld->ops->receive_buf(tty, data, flags, count);
964*4882a593Smuzhiyun tty_ldisc_deref(ld);
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /* mgsl_stop() throttle (stop) transmitter
969*4882a593Smuzhiyun *
970*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
971*4882a593Smuzhiyun * Return Value: None
972*4882a593Smuzhiyun */
mgsl_stop(struct tty_struct * tty)973*4882a593Smuzhiyun static void mgsl_stop(struct tty_struct *tty)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
976*4882a593Smuzhiyun unsigned long flags;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
979*4882a593Smuzhiyun return;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
982*4882a593Smuzhiyun printk("mgsl_stop(%s)\n",info->device_name);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
985*4882a593Smuzhiyun if (info->tx_enabled)
986*4882a593Smuzhiyun usc_stop_transmitter(info);
987*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun } /* end of mgsl_stop() */
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /* mgsl_start() release (start) transmitter
992*4882a593Smuzhiyun *
993*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
994*4882a593Smuzhiyun * Return Value: None
995*4882a593Smuzhiyun */
mgsl_start(struct tty_struct * tty)996*4882a593Smuzhiyun static void mgsl_start(struct tty_struct *tty)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
999*4882a593Smuzhiyun unsigned long flags;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1002*4882a593Smuzhiyun return;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
1005*4882a593Smuzhiyun printk("mgsl_start(%s)\n",info->device_name);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
1008*4882a593Smuzhiyun if (!info->tx_enabled)
1009*4882a593Smuzhiyun usc_start_transmitter(info);
1010*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun } /* end of mgsl_start() */
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun /*
1015*4882a593Smuzhiyun * Bottom half work queue access functions
1016*4882a593Smuzhiyun */
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /* mgsl_bh_action() Return next bottom half action to perform.
1019*4882a593Smuzhiyun * Return Value: BH action code or 0 if nothing to do.
1020*4882a593Smuzhiyun */
mgsl_bh_action(struct mgsl_struct * info)1021*4882a593Smuzhiyun static int mgsl_bh_action(struct mgsl_struct *info)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun unsigned long flags;
1024*4882a593Smuzhiyun int rc = 0;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun if (info->pending_bh & BH_RECEIVE) {
1029*4882a593Smuzhiyun info->pending_bh &= ~BH_RECEIVE;
1030*4882a593Smuzhiyun rc = BH_RECEIVE;
1031*4882a593Smuzhiyun } else if (info->pending_bh & BH_TRANSMIT) {
1032*4882a593Smuzhiyun info->pending_bh &= ~BH_TRANSMIT;
1033*4882a593Smuzhiyun rc = BH_TRANSMIT;
1034*4882a593Smuzhiyun } else if (info->pending_bh & BH_STATUS) {
1035*4882a593Smuzhiyun info->pending_bh &= ~BH_STATUS;
1036*4882a593Smuzhiyun rc = BH_STATUS;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (!rc) {
1040*4882a593Smuzhiyun /* Mark BH routine as complete */
1041*4882a593Smuzhiyun info->bh_running = false;
1042*4882a593Smuzhiyun info->bh_requested = false;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun return rc;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /*
1051*4882a593Smuzhiyun * Perform bottom half processing of work items queued by ISR.
1052*4882a593Smuzhiyun */
mgsl_bh_handler(struct work_struct * work)1053*4882a593Smuzhiyun static void mgsl_bh_handler(struct work_struct *work)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun struct mgsl_struct *info =
1056*4882a593Smuzhiyun container_of(work, struct mgsl_struct, task);
1057*4882a593Smuzhiyun int action;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_BH )
1060*4882a593Smuzhiyun printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1061*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun info->bh_running = true;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun while((action = mgsl_bh_action(info)) != 0) {
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun /* Process work item */
1068*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_BH )
1069*4882a593Smuzhiyun printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1070*4882a593Smuzhiyun __FILE__,__LINE__,action);
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun switch (action) {
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun case BH_RECEIVE:
1075*4882a593Smuzhiyun mgsl_bh_receive(info);
1076*4882a593Smuzhiyun break;
1077*4882a593Smuzhiyun case BH_TRANSMIT:
1078*4882a593Smuzhiyun mgsl_bh_transmit(info);
1079*4882a593Smuzhiyun break;
1080*4882a593Smuzhiyun case BH_STATUS:
1081*4882a593Smuzhiyun mgsl_bh_status(info);
1082*4882a593Smuzhiyun break;
1083*4882a593Smuzhiyun default:
1084*4882a593Smuzhiyun /* unknown work item ID */
1085*4882a593Smuzhiyun printk("Unknown work item ID=%08X!\n", action);
1086*4882a593Smuzhiyun break;
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_BH )
1091*4882a593Smuzhiyun printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1092*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun
mgsl_bh_receive(struct mgsl_struct * info)1095*4882a593Smuzhiyun static void mgsl_bh_receive(struct mgsl_struct *info)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun bool (*get_rx_frame)(struct mgsl_struct *info) =
1098*4882a593Smuzhiyun (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_BH )
1101*4882a593Smuzhiyun printk( "%s(%d):mgsl_bh_receive(%s)\n",
1102*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun do
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun if (info->rx_rcc_underrun) {
1107*4882a593Smuzhiyun unsigned long flags;
1108*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
1109*4882a593Smuzhiyun usc_start_receiver(info);
1110*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
1111*4882a593Smuzhiyun return;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun } while(get_rx_frame(info));
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
mgsl_bh_transmit(struct mgsl_struct * info)1116*4882a593Smuzhiyun static void mgsl_bh_transmit(struct mgsl_struct *info)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun struct tty_struct *tty = info->port.tty;
1119*4882a593Smuzhiyun unsigned long flags;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_BH )
1122*4882a593Smuzhiyun printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1123*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun if (tty)
1126*4882a593Smuzhiyun tty_wakeup(tty);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /* if transmitter idle and loopmode_send_done_requested
1129*4882a593Smuzhiyun * then start echoing RxD to TxD
1130*4882a593Smuzhiyun */
1131*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
1132*4882a593Smuzhiyun if ( !info->tx_active && info->loopmode_send_done_requested )
1133*4882a593Smuzhiyun usc_loopmode_send_done( info );
1134*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
mgsl_bh_status(struct mgsl_struct * info)1137*4882a593Smuzhiyun static void mgsl_bh_status(struct mgsl_struct *info)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_BH )
1140*4882a593Smuzhiyun printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1141*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun info->ri_chkcount = 0;
1144*4882a593Smuzhiyun info->dsr_chkcount = 0;
1145*4882a593Smuzhiyun info->dcd_chkcount = 0;
1146*4882a593Smuzhiyun info->cts_chkcount = 0;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /* mgsl_isr_receive_status()
1150*4882a593Smuzhiyun *
1151*4882a593Smuzhiyun * Service a receive status interrupt. The type of status
1152*4882a593Smuzhiyun * interrupt is indicated by the state of the RCSR.
1153*4882a593Smuzhiyun * This is only used for HDLC mode.
1154*4882a593Smuzhiyun *
1155*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1156*4882a593Smuzhiyun * Return Value: None
1157*4882a593Smuzhiyun */
mgsl_isr_receive_status(struct mgsl_struct * info)1158*4882a593Smuzhiyun static void mgsl_isr_receive_status( struct mgsl_struct *info )
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun u16 status = usc_InReg( info, RCSR );
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1163*4882a593Smuzhiyun printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1164*4882a593Smuzhiyun __FILE__,__LINE__,status);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1167*4882a593Smuzhiyun info->loopmode_insert_requested &&
1168*4882a593Smuzhiyun usc_loopmode_active(info) )
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun ++info->icount.rxabort;
1171*4882a593Smuzhiyun info->loopmode_insert_requested = false;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /* clear CMR:13 to start echoing RxD to TxD */
1174*4882a593Smuzhiyun info->cmr_value &= ~BIT13;
1175*4882a593Smuzhiyun usc_OutReg(info, CMR, info->cmr_value);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun /* disable received abort irq (no longer required) */
1178*4882a593Smuzhiyun usc_OutReg(info, RICR,
1179*4882a593Smuzhiyun (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
1183*4882a593Smuzhiyun if (status & RXSTATUS_EXITED_HUNT)
1184*4882a593Smuzhiyun info->icount.exithunt++;
1185*4882a593Smuzhiyun if (status & RXSTATUS_IDLE_RECEIVED)
1186*4882a593Smuzhiyun info->icount.rxidle++;
1187*4882a593Smuzhiyun wake_up_interruptible(&info->event_wait_q);
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun if (status & RXSTATUS_OVERRUN){
1191*4882a593Smuzhiyun info->icount.rxover++;
1192*4882a593Smuzhiyun usc_process_rxoverrun_sync( info );
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1196*4882a593Smuzhiyun usc_UnlatchRxstatusBits( info, status );
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun } /* end of mgsl_isr_receive_status() */
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun /* mgsl_isr_transmit_status()
1201*4882a593Smuzhiyun *
1202*4882a593Smuzhiyun * Service a transmit status interrupt
1203*4882a593Smuzhiyun * HDLC mode :end of transmit frame
1204*4882a593Smuzhiyun * Async mode:all data is sent
1205*4882a593Smuzhiyun * transmit status is indicated by bits in the TCSR.
1206*4882a593Smuzhiyun *
1207*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1208*4882a593Smuzhiyun * Return Value: None
1209*4882a593Smuzhiyun */
mgsl_isr_transmit_status(struct mgsl_struct * info)1210*4882a593Smuzhiyun static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun u16 status = usc_InReg( info, TCSR );
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1215*4882a593Smuzhiyun printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1216*4882a593Smuzhiyun __FILE__,__LINE__,status);
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1219*4882a593Smuzhiyun usc_UnlatchTxstatusBits( info, status );
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1222*4882a593Smuzhiyun {
1223*4882a593Smuzhiyun /* finished sending HDLC abort. This may leave */
1224*4882a593Smuzhiyun /* the TxFifo with data from the aborted frame */
1225*4882a593Smuzhiyun /* so purge the TxFifo. Also shutdown the DMA */
1226*4882a593Smuzhiyun /* channel in case there is data remaining in */
1227*4882a593Smuzhiyun /* the DMA buffer */
1228*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1229*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeTxFifo );
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun if ( status & TXSTATUS_EOF_SENT )
1233*4882a593Smuzhiyun info->icount.txok++;
1234*4882a593Smuzhiyun else if ( status & TXSTATUS_UNDERRUN )
1235*4882a593Smuzhiyun info->icount.txunder++;
1236*4882a593Smuzhiyun else if ( status & TXSTATUS_ABORT_SENT )
1237*4882a593Smuzhiyun info->icount.txabort++;
1238*4882a593Smuzhiyun else
1239*4882a593Smuzhiyun info->icount.txunder++;
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun info->tx_active = false;
1242*4882a593Smuzhiyun info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1243*4882a593Smuzhiyun del_timer(&info->tx_timer);
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun if ( info->drop_rts_on_tx_done ) {
1246*4882a593Smuzhiyun usc_get_serial_signals( info );
1247*4882a593Smuzhiyun if ( info->serial_signals & SerialSignal_RTS ) {
1248*4882a593Smuzhiyun info->serial_signals &= ~SerialSignal_RTS;
1249*4882a593Smuzhiyun usc_set_serial_signals( info );
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun info->drop_rts_on_tx_done = false;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
1255*4882a593Smuzhiyun if (info->netcount)
1256*4882a593Smuzhiyun hdlcdev_tx_done(info);
1257*4882a593Smuzhiyun else
1258*4882a593Smuzhiyun #endif
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1261*4882a593Smuzhiyun usc_stop_transmitter(info);
1262*4882a593Smuzhiyun return;
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun info->pending_bh |= BH_TRANSMIT;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun } /* end of mgsl_isr_transmit_status() */
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /* mgsl_isr_io_pin()
1270*4882a593Smuzhiyun *
1271*4882a593Smuzhiyun * Service an Input/Output pin interrupt. The type of
1272*4882a593Smuzhiyun * interrupt is indicated by bits in the MISR
1273*4882a593Smuzhiyun *
1274*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1275*4882a593Smuzhiyun * Return Value: None
1276*4882a593Smuzhiyun */
mgsl_isr_io_pin(struct mgsl_struct * info)1277*4882a593Smuzhiyun static void mgsl_isr_io_pin( struct mgsl_struct *info )
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun struct mgsl_icount *icount;
1280*4882a593Smuzhiyun u16 status = usc_InReg( info, MISR );
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1283*4882a593Smuzhiyun printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1284*4882a593Smuzhiyun __FILE__,__LINE__,status);
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, IO_PIN );
1287*4882a593Smuzhiyun usc_UnlatchIostatusBits( info, status );
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1290*4882a593Smuzhiyun MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1291*4882a593Smuzhiyun icount = &info->icount;
1292*4882a593Smuzhiyun /* update input line counters */
1293*4882a593Smuzhiyun if (status & MISCSTATUS_RI_LATCHED) {
1294*4882a593Smuzhiyun if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1295*4882a593Smuzhiyun usc_DisablestatusIrqs(info,SICR_RI);
1296*4882a593Smuzhiyun icount->rng++;
1297*4882a593Smuzhiyun if ( status & MISCSTATUS_RI )
1298*4882a593Smuzhiyun info->input_signal_events.ri_up++;
1299*4882a593Smuzhiyun else
1300*4882a593Smuzhiyun info->input_signal_events.ri_down++;
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun if (status & MISCSTATUS_DSR_LATCHED) {
1303*4882a593Smuzhiyun if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1304*4882a593Smuzhiyun usc_DisablestatusIrqs(info,SICR_DSR);
1305*4882a593Smuzhiyun icount->dsr++;
1306*4882a593Smuzhiyun if ( status & MISCSTATUS_DSR )
1307*4882a593Smuzhiyun info->input_signal_events.dsr_up++;
1308*4882a593Smuzhiyun else
1309*4882a593Smuzhiyun info->input_signal_events.dsr_down++;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun if (status & MISCSTATUS_DCD_LATCHED) {
1312*4882a593Smuzhiyun if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1313*4882a593Smuzhiyun usc_DisablestatusIrqs(info,SICR_DCD);
1314*4882a593Smuzhiyun icount->dcd++;
1315*4882a593Smuzhiyun if (status & MISCSTATUS_DCD) {
1316*4882a593Smuzhiyun info->input_signal_events.dcd_up++;
1317*4882a593Smuzhiyun } else
1318*4882a593Smuzhiyun info->input_signal_events.dcd_down++;
1319*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
1320*4882a593Smuzhiyun if (info->netcount) {
1321*4882a593Smuzhiyun if (status & MISCSTATUS_DCD)
1322*4882a593Smuzhiyun netif_carrier_on(info->netdev);
1323*4882a593Smuzhiyun else
1324*4882a593Smuzhiyun netif_carrier_off(info->netdev);
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun #endif
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun if (status & MISCSTATUS_CTS_LATCHED)
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1331*4882a593Smuzhiyun usc_DisablestatusIrqs(info,SICR_CTS);
1332*4882a593Smuzhiyun icount->cts++;
1333*4882a593Smuzhiyun if ( status & MISCSTATUS_CTS )
1334*4882a593Smuzhiyun info->input_signal_events.cts_up++;
1335*4882a593Smuzhiyun else
1336*4882a593Smuzhiyun info->input_signal_events.cts_down++;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun wake_up_interruptible(&info->status_event_wait_q);
1339*4882a593Smuzhiyun wake_up_interruptible(&info->event_wait_q);
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun if (tty_port_check_carrier(&info->port) &&
1342*4882a593Smuzhiyun (status & MISCSTATUS_DCD_LATCHED) ) {
1343*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1344*4882a593Smuzhiyun printk("%s CD now %s...", info->device_name,
1345*4882a593Smuzhiyun (status & MISCSTATUS_DCD) ? "on" : "off");
1346*4882a593Smuzhiyun if (status & MISCSTATUS_DCD)
1347*4882a593Smuzhiyun wake_up_interruptible(&info->port.open_wait);
1348*4882a593Smuzhiyun else {
1349*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1350*4882a593Smuzhiyun printk("doing serial hangup...");
1351*4882a593Smuzhiyun if (info->port.tty)
1352*4882a593Smuzhiyun tty_hangup(info->port.tty);
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun if (tty_port_cts_enabled(&info->port) &&
1357*4882a593Smuzhiyun (status & MISCSTATUS_CTS_LATCHED) ) {
1358*4882a593Smuzhiyun if (info->port.tty->hw_stopped) {
1359*4882a593Smuzhiyun if (status & MISCSTATUS_CTS) {
1360*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1361*4882a593Smuzhiyun printk("CTS tx start...");
1362*4882a593Smuzhiyun info->port.tty->hw_stopped = 0;
1363*4882a593Smuzhiyun usc_start_transmitter(info);
1364*4882a593Smuzhiyun info->pending_bh |= BH_TRANSMIT;
1365*4882a593Smuzhiyun return;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun } else {
1368*4882a593Smuzhiyun if (!(status & MISCSTATUS_CTS)) {
1369*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1370*4882a593Smuzhiyun printk("CTS tx stop...");
1371*4882a593Smuzhiyun if (info->port.tty)
1372*4882a593Smuzhiyun info->port.tty->hw_stopped = 1;
1373*4882a593Smuzhiyun usc_stop_transmitter(info);
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun info->pending_bh |= BH_STATUS;
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun /* for diagnostics set IRQ flag */
1382*4882a593Smuzhiyun if ( status & MISCSTATUS_TXC_LATCHED ){
1383*4882a593Smuzhiyun usc_OutReg( info, SICR,
1384*4882a593Smuzhiyun (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1385*4882a593Smuzhiyun usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1386*4882a593Smuzhiyun info->irq_occurred = true;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun } /* end of mgsl_isr_io_pin() */
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun /* mgsl_isr_transmit_data()
1392*4882a593Smuzhiyun *
1393*4882a593Smuzhiyun * Service a transmit data interrupt (async mode only).
1394*4882a593Smuzhiyun *
1395*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1396*4882a593Smuzhiyun * Return Value: None
1397*4882a593Smuzhiyun */
mgsl_isr_transmit_data(struct mgsl_struct * info)1398*4882a593Smuzhiyun static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1401*4882a593Smuzhiyun printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1402*4882a593Smuzhiyun __FILE__,__LINE__,info->xmit_cnt);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1407*4882a593Smuzhiyun usc_stop_transmitter(info);
1408*4882a593Smuzhiyun return;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun if ( info->xmit_cnt )
1412*4882a593Smuzhiyun usc_load_txfifo( info );
1413*4882a593Smuzhiyun else
1414*4882a593Smuzhiyun info->tx_active = false;
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun if (info->xmit_cnt < WAKEUP_CHARS)
1417*4882a593Smuzhiyun info->pending_bh |= BH_TRANSMIT;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun } /* end of mgsl_isr_transmit_data() */
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun /* mgsl_isr_receive_data()
1422*4882a593Smuzhiyun *
1423*4882a593Smuzhiyun * Service a receive data interrupt. This occurs
1424*4882a593Smuzhiyun * when operating in asynchronous interrupt transfer mode.
1425*4882a593Smuzhiyun * The receive data FIFO is flushed to the receive data buffers.
1426*4882a593Smuzhiyun *
1427*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1428*4882a593Smuzhiyun * Return Value: None
1429*4882a593Smuzhiyun */
mgsl_isr_receive_data(struct mgsl_struct * info)1430*4882a593Smuzhiyun static void mgsl_isr_receive_data( struct mgsl_struct *info )
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun int Fifocount;
1433*4882a593Smuzhiyun u16 status;
1434*4882a593Smuzhiyun int work = 0;
1435*4882a593Smuzhiyun unsigned char DataByte;
1436*4882a593Smuzhiyun struct mgsl_icount *icount = &info->icount;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1439*4882a593Smuzhiyun printk("%s(%d):mgsl_isr_receive_data\n",
1440*4882a593Smuzhiyun __FILE__,__LINE__);
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun /* select FIFO status for RICR readback */
1445*4882a593Smuzhiyun usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun /* clear the Wordstatus bit so that status readback */
1448*4882a593Smuzhiyun /* only reflects the status of this byte */
1449*4882a593Smuzhiyun usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun /* flush the receive FIFO */
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1454*4882a593Smuzhiyun int flag;
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun /* read one byte from RxFIFO */
1457*4882a593Smuzhiyun outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1458*4882a593Smuzhiyun info->io_base + CCAR );
1459*4882a593Smuzhiyun DataByte = inb( info->io_base + CCAR );
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun /* get the status of the received byte */
1462*4882a593Smuzhiyun status = usc_InReg(info, RCSR);
1463*4882a593Smuzhiyun if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1464*4882a593Smuzhiyun RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
1465*4882a593Smuzhiyun usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun icount->rx++;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun flag = 0;
1470*4882a593Smuzhiyun if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1471*4882a593Smuzhiyun RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
1472*4882a593Smuzhiyun printk("rxerr=%04X\n",status);
1473*4882a593Smuzhiyun /* update error statistics */
1474*4882a593Smuzhiyun if ( status & RXSTATUS_BREAK_RECEIVED ) {
1475*4882a593Smuzhiyun status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
1476*4882a593Smuzhiyun icount->brk++;
1477*4882a593Smuzhiyun } else if (status & RXSTATUS_PARITY_ERROR)
1478*4882a593Smuzhiyun icount->parity++;
1479*4882a593Smuzhiyun else if (status & RXSTATUS_FRAMING_ERROR)
1480*4882a593Smuzhiyun icount->frame++;
1481*4882a593Smuzhiyun else if (status & RXSTATUS_OVERRUN) {
1482*4882a593Smuzhiyun /* must issue purge fifo cmd before */
1483*4882a593Smuzhiyun /* 16C32 accepts more receive chars */
1484*4882a593Smuzhiyun usc_RTCmd(info,RTCmd_PurgeRxFifo);
1485*4882a593Smuzhiyun icount->overrun++;
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun /* discard char if tty control flags say so */
1489*4882a593Smuzhiyun if (status & info->ignore_status_mask)
1490*4882a593Smuzhiyun continue;
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun status &= info->read_status_mask;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun if (status & RXSTATUS_BREAK_RECEIVED) {
1495*4882a593Smuzhiyun flag = TTY_BREAK;
1496*4882a593Smuzhiyun if (info->port.flags & ASYNC_SAK)
1497*4882a593Smuzhiyun do_SAK(info->port.tty);
1498*4882a593Smuzhiyun } else if (status & RXSTATUS_PARITY_ERROR)
1499*4882a593Smuzhiyun flag = TTY_PARITY;
1500*4882a593Smuzhiyun else if (status & RXSTATUS_FRAMING_ERROR)
1501*4882a593Smuzhiyun flag = TTY_FRAME;
1502*4882a593Smuzhiyun } /* end of if (error) */
1503*4882a593Smuzhiyun tty_insert_flip_char(&info->port, DataByte, flag);
1504*4882a593Smuzhiyun if (status & RXSTATUS_OVERRUN) {
1505*4882a593Smuzhiyun /* Overrun is special, since it's
1506*4882a593Smuzhiyun * reported immediately, and doesn't
1507*4882a593Smuzhiyun * affect the current character
1508*4882a593Smuzhiyun */
1509*4882a593Smuzhiyun work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR ) {
1514*4882a593Smuzhiyun printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1515*4882a593Smuzhiyun __FILE__,__LINE__,icount->rx,icount->brk,
1516*4882a593Smuzhiyun icount->parity,icount->frame,icount->overrun);
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun if(work)
1520*4882a593Smuzhiyun tty_flip_buffer_push(&info->port);
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun /* mgsl_isr_misc()
1524*4882a593Smuzhiyun *
1525*4882a593Smuzhiyun * Service a miscellaneous interrupt source.
1526*4882a593Smuzhiyun *
1527*4882a593Smuzhiyun * Arguments: info pointer to device extension (instance data)
1528*4882a593Smuzhiyun * Return Value: None
1529*4882a593Smuzhiyun */
mgsl_isr_misc(struct mgsl_struct * info)1530*4882a593Smuzhiyun static void mgsl_isr_misc( struct mgsl_struct *info )
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun u16 status = usc_InReg( info, MISR );
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1535*4882a593Smuzhiyun printk("%s(%d):mgsl_isr_misc status=%04X\n",
1536*4882a593Smuzhiyun __FILE__,__LINE__,status);
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1539*4882a593Smuzhiyun (info->params.mode == MGSL_MODE_HDLC)) {
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun /* turn off receiver and rx DMA */
1542*4882a593Smuzhiyun usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1543*4882a593Smuzhiyun usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1544*4882a593Smuzhiyun usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1545*4882a593Smuzhiyun usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
1546*4882a593Smuzhiyun usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun /* schedule BH handler to restart receiver */
1549*4882a593Smuzhiyun info->pending_bh |= BH_RECEIVE;
1550*4882a593Smuzhiyun info->rx_rcc_underrun = true;
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, MISC );
1554*4882a593Smuzhiyun usc_UnlatchMiscstatusBits( info, status );
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun } /* end of mgsl_isr_misc() */
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun /* mgsl_isr_null()
1559*4882a593Smuzhiyun *
1560*4882a593Smuzhiyun * Services undefined interrupt vectors from the
1561*4882a593Smuzhiyun * USC. (hence this function SHOULD never be called)
1562*4882a593Smuzhiyun *
1563*4882a593Smuzhiyun * Arguments: info pointer to device extension (instance data)
1564*4882a593Smuzhiyun * Return Value: None
1565*4882a593Smuzhiyun */
mgsl_isr_null(struct mgsl_struct * info)1566*4882a593Smuzhiyun static void mgsl_isr_null( struct mgsl_struct *info )
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun } /* end of mgsl_isr_null() */
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun /* mgsl_isr_receive_dma()
1572*4882a593Smuzhiyun *
1573*4882a593Smuzhiyun * Service a receive DMA channel interrupt.
1574*4882a593Smuzhiyun * For this driver there are two sources of receive DMA interrupts
1575*4882a593Smuzhiyun * as identified in the Receive DMA mode Register (RDMR):
1576*4882a593Smuzhiyun *
1577*4882a593Smuzhiyun * BIT3 EOA/EOL End of List, all receive buffers in receive
1578*4882a593Smuzhiyun * buffer list have been filled (no more free buffers
1579*4882a593Smuzhiyun * available). The DMA controller has shut down.
1580*4882a593Smuzhiyun *
1581*4882a593Smuzhiyun * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1582*4882a593Smuzhiyun * DMA buffer is terminated in response to completion
1583*4882a593Smuzhiyun * of a good frame or a frame with errors. The status
1584*4882a593Smuzhiyun * of the frame is stored in the buffer entry in the
1585*4882a593Smuzhiyun * list of receive buffer entries.
1586*4882a593Smuzhiyun *
1587*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1588*4882a593Smuzhiyun * Return Value: None
1589*4882a593Smuzhiyun */
mgsl_isr_receive_dma(struct mgsl_struct * info)1590*4882a593Smuzhiyun static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1591*4882a593Smuzhiyun {
1592*4882a593Smuzhiyun u16 status;
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1595*4882a593Smuzhiyun usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun /* Read the receive DMA status to identify interrupt type. */
1598*4882a593Smuzhiyun /* This also clears the status bits. */
1599*4882a593Smuzhiyun status = usc_InDmaReg( info, RDMR );
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1602*4882a593Smuzhiyun printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1603*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,status);
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun info->pending_bh |= BH_RECEIVE;
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun if ( status & BIT3 ) {
1608*4882a593Smuzhiyun info->rx_overflow = true;
1609*4882a593Smuzhiyun info->icount.buf_overrun++;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun } /* end of mgsl_isr_receive_dma() */
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun /* mgsl_isr_transmit_dma()
1615*4882a593Smuzhiyun *
1616*4882a593Smuzhiyun * This function services a transmit DMA channel interrupt.
1617*4882a593Smuzhiyun *
1618*4882a593Smuzhiyun * For this driver there is one source of transmit DMA interrupts
1619*4882a593Smuzhiyun * as identified in the Transmit DMA Mode Register (TDMR):
1620*4882a593Smuzhiyun *
1621*4882a593Smuzhiyun * BIT2 EOB End of Buffer. This interrupt occurs when a
1622*4882a593Smuzhiyun * transmit DMA buffer has been emptied.
1623*4882a593Smuzhiyun *
1624*4882a593Smuzhiyun * The driver maintains enough transmit DMA buffers to hold at least
1625*4882a593Smuzhiyun * one max frame size transmit frame. When operating in a buffered
1626*4882a593Smuzhiyun * transmit mode, there may be enough transmit DMA buffers to hold at
1627*4882a593Smuzhiyun * least two or more max frame size frames. On an EOB condition,
1628*4882a593Smuzhiyun * determine if there are any queued transmit buffers and copy into
1629*4882a593Smuzhiyun * transmit DMA buffers if we have room.
1630*4882a593Smuzhiyun *
1631*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1632*4882a593Smuzhiyun * Return Value: None
1633*4882a593Smuzhiyun */
mgsl_isr_transmit_dma(struct mgsl_struct * info)1634*4882a593Smuzhiyun static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1635*4882a593Smuzhiyun {
1636*4882a593Smuzhiyun u16 status;
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1639*4882a593Smuzhiyun usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun /* Read the transmit DMA status to identify interrupt type. */
1642*4882a593Smuzhiyun /* This also clears the status bits. */
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun status = usc_InDmaReg( info, TDMR );
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1647*4882a593Smuzhiyun printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1648*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,status);
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun if ( status & BIT2 ) {
1651*4882a593Smuzhiyun --info->tx_dma_buffers_used;
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun /* if there are transmit frames queued,
1654*4882a593Smuzhiyun * try to load the next one
1655*4882a593Smuzhiyun */
1656*4882a593Smuzhiyun if ( load_next_tx_holding_buffer(info) ) {
1657*4882a593Smuzhiyun /* if call returns non-zero value, we have
1658*4882a593Smuzhiyun * at least one free tx holding buffer
1659*4882a593Smuzhiyun */
1660*4882a593Smuzhiyun info->pending_bh |= BH_TRANSMIT;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun } /* end of mgsl_isr_transmit_dma() */
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun /* mgsl_interrupt()
1667*4882a593Smuzhiyun *
1668*4882a593Smuzhiyun * Interrupt service routine entry point.
1669*4882a593Smuzhiyun *
1670*4882a593Smuzhiyun * Arguments:
1671*4882a593Smuzhiyun *
1672*4882a593Smuzhiyun * irq interrupt number that caused interrupt
1673*4882a593Smuzhiyun * dev_id device ID supplied during interrupt registration
1674*4882a593Smuzhiyun *
1675*4882a593Smuzhiyun * Return Value: None
1676*4882a593Smuzhiyun */
mgsl_interrupt(int dummy,void * dev_id)1677*4882a593Smuzhiyun static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun struct mgsl_struct *info = dev_id;
1680*4882a593Smuzhiyun u16 UscVector;
1681*4882a593Smuzhiyun u16 DmaVector;
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1684*4882a593Smuzhiyun printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1685*4882a593Smuzhiyun __FILE__, __LINE__, info->irq_level);
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun spin_lock(&info->irq_spinlock);
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun for(;;) {
1690*4882a593Smuzhiyun /* Read the interrupt vectors from hardware. */
1691*4882a593Smuzhiyun UscVector = usc_InReg(info, IVR) >> 9;
1692*4882a593Smuzhiyun DmaVector = usc_InDmaReg(info, DIVR);
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1695*4882a593Smuzhiyun printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1696*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun if ( !UscVector && !DmaVector )
1699*4882a593Smuzhiyun break;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun /* Dispatch interrupt vector */
1702*4882a593Smuzhiyun if ( UscVector )
1703*4882a593Smuzhiyun (*UscIsrTable[UscVector])(info);
1704*4882a593Smuzhiyun else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1705*4882a593Smuzhiyun mgsl_isr_transmit_dma(info);
1706*4882a593Smuzhiyun else
1707*4882a593Smuzhiyun mgsl_isr_receive_dma(info);
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun if ( info->isr_overflow ) {
1710*4882a593Smuzhiyun printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1711*4882a593Smuzhiyun __FILE__, __LINE__, info->device_name, info->irq_level);
1712*4882a593Smuzhiyun usc_DisableMasterIrqBit(info);
1713*4882a593Smuzhiyun usc_DisableDmaInterrupts(info,DICR_MASTER);
1714*4882a593Smuzhiyun break;
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun /* Request bottom half processing if there's something
1719*4882a593Smuzhiyun * for it to do and the bh is not already running
1720*4882a593Smuzhiyun */
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1723*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1724*4882a593Smuzhiyun printk("%s(%d):%s queueing bh task.\n",
1725*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
1726*4882a593Smuzhiyun schedule_work(&info->task);
1727*4882a593Smuzhiyun info->bh_requested = true;
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun spin_unlock(&info->irq_spinlock);
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_ISR )
1733*4882a593Smuzhiyun printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1734*4882a593Smuzhiyun __FILE__, __LINE__, info->irq_level);
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun return IRQ_HANDLED;
1737*4882a593Smuzhiyun } /* end of mgsl_interrupt() */
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun /* startup()
1740*4882a593Smuzhiyun *
1741*4882a593Smuzhiyun * Initialize and start device.
1742*4882a593Smuzhiyun *
1743*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1744*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
1745*4882a593Smuzhiyun */
startup(struct mgsl_struct * info)1746*4882a593Smuzhiyun static int startup(struct mgsl_struct * info)
1747*4882a593Smuzhiyun {
1748*4882a593Smuzhiyun int retval = 0;
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
1751*4882a593Smuzhiyun printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun if (tty_port_initialized(&info->port))
1754*4882a593Smuzhiyun return 0;
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun if (!info->xmit_buf) {
1757*4882a593Smuzhiyun /* allocate a page of memory for a transmit buffer */
1758*4882a593Smuzhiyun info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1759*4882a593Smuzhiyun if (!info->xmit_buf) {
1760*4882a593Smuzhiyun printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1761*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
1762*4882a593Smuzhiyun return -ENOMEM;
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun info->pending_bh = 0;
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun memset(&info->icount, 0, sizeof(info->icount));
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun /* Allocate and claim adapter resources */
1773*4882a593Smuzhiyun retval = mgsl_claim_resources(info);
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun /* perform existence check and diagnostics */
1776*4882a593Smuzhiyun if ( !retval )
1777*4882a593Smuzhiyun retval = mgsl_adapter_test(info);
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun if ( retval ) {
1780*4882a593Smuzhiyun if (capable(CAP_SYS_ADMIN) && info->port.tty)
1781*4882a593Smuzhiyun set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1782*4882a593Smuzhiyun mgsl_release_resources(info);
1783*4882a593Smuzhiyun return retval;
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun /* program hardware for current parameters */
1787*4882a593Smuzhiyun mgsl_change_params(info);
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun if (info->port.tty)
1790*4882a593Smuzhiyun clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun tty_port_set_initialized(&info->port, 1);
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun return 0;
1795*4882a593Smuzhiyun } /* end of startup() */
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun /* shutdown()
1798*4882a593Smuzhiyun *
1799*4882a593Smuzhiyun * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1800*4882a593Smuzhiyun *
1801*4882a593Smuzhiyun * Arguments: info pointer to device instance data
1802*4882a593Smuzhiyun * Return Value: None
1803*4882a593Smuzhiyun */
shutdown(struct mgsl_struct * info)1804*4882a593Smuzhiyun static void shutdown(struct mgsl_struct * info)
1805*4882a593Smuzhiyun {
1806*4882a593Smuzhiyun unsigned long flags;
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun if (!tty_port_initialized(&info->port))
1809*4882a593Smuzhiyun return;
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
1812*4882a593Smuzhiyun printk("%s(%d):mgsl_shutdown(%s)\n",
1813*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun /* clear status wait queue because status changes */
1816*4882a593Smuzhiyun /* can't happen after shutting down the hardware */
1817*4882a593Smuzhiyun wake_up_interruptible(&info->status_event_wait_q);
1818*4882a593Smuzhiyun wake_up_interruptible(&info->event_wait_q);
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun del_timer_sync(&info->tx_timer);
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun if (info->xmit_buf) {
1823*4882a593Smuzhiyun free_page((unsigned long) info->xmit_buf);
1824*4882a593Smuzhiyun info->xmit_buf = NULL;
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
1828*4882a593Smuzhiyun usc_DisableMasterIrqBit(info);
1829*4882a593Smuzhiyun usc_stop_receiver(info);
1830*4882a593Smuzhiyun usc_stop_transmitter(info);
1831*4882a593Smuzhiyun usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
1832*4882a593Smuzhiyun TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
1833*4882a593Smuzhiyun usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun /* Disable DMAEN (Port 7, Bit 14) */
1836*4882a593Smuzhiyun /* This disconnects the DMA request signal from the ISA bus */
1837*4882a593Smuzhiyun /* on the ISA adapter. This has no effect for the PCI adapter */
1838*4882a593Smuzhiyun usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun /* Disable INTEN (Port 6, Bit12) */
1841*4882a593Smuzhiyun /* This disconnects the IRQ request signal to the ISA bus */
1842*4882a593Smuzhiyun /* on the ISA adapter. This has no effect for the PCI adapter */
1843*4882a593Smuzhiyun usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
1846*4882a593Smuzhiyun info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1847*4882a593Smuzhiyun usc_set_serial_signals(info);
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun mgsl_release_resources(info);
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun if (info->port.tty)
1855*4882a593Smuzhiyun set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun tty_port_set_initialized(&info->port, 0);
1858*4882a593Smuzhiyun } /* end of shutdown() */
1859*4882a593Smuzhiyun
mgsl_program_hw(struct mgsl_struct * info)1860*4882a593Smuzhiyun static void mgsl_program_hw(struct mgsl_struct *info)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun unsigned long flags;
1863*4882a593Smuzhiyun
1864*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun usc_stop_receiver(info);
1867*4882a593Smuzhiyun usc_stop_transmitter(info);
1868*4882a593Smuzhiyun info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun if (info->params.mode == MGSL_MODE_HDLC ||
1871*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW ||
1872*4882a593Smuzhiyun info->netcount)
1873*4882a593Smuzhiyun usc_set_sync_mode(info);
1874*4882a593Smuzhiyun else
1875*4882a593Smuzhiyun usc_set_async_mode(info);
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun usc_set_serial_signals(info);
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun info->dcd_chkcount = 0;
1880*4882a593Smuzhiyun info->cts_chkcount = 0;
1881*4882a593Smuzhiyun info->ri_chkcount = 0;
1882*4882a593Smuzhiyun info->dsr_chkcount = 0;
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1885*4882a593Smuzhiyun usc_EnableInterrupts(info, IO_PIN);
1886*4882a593Smuzhiyun usc_get_serial_signals(info);
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun if (info->netcount || info->port.tty->termios.c_cflag & CREAD)
1889*4882a593Smuzhiyun usc_start_receiver(info);
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun /* Reconfigure adapter based on new parameters
1895*4882a593Smuzhiyun */
mgsl_change_params(struct mgsl_struct * info)1896*4882a593Smuzhiyun static void mgsl_change_params(struct mgsl_struct *info)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun unsigned cflag;
1899*4882a593Smuzhiyun int bits_per_char;
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun if (!info->port.tty)
1902*4882a593Smuzhiyun return;
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
1905*4882a593Smuzhiyun printk("%s(%d):mgsl_change_params(%s)\n",
1906*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun cflag = info->port.tty->termios.c_cflag;
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun /* if B0 rate (hangup) specified then negate RTS and DTR */
1911*4882a593Smuzhiyun /* otherwise assert RTS and DTR */
1912*4882a593Smuzhiyun if (cflag & CBAUD)
1913*4882a593Smuzhiyun info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
1914*4882a593Smuzhiyun else
1915*4882a593Smuzhiyun info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun /* byte size and parity */
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun switch (cflag & CSIZE) {
1920*4882a593Smuzhiyun case CS5: info->params.data_bits = 5; break;
1921*4882a593Smuzhiyun case CS6: info->params.data_bits = 6; break;
1922*4882a593Smuzhiyun case CS7: info->params.data_bits = 7; break;
1923*4882a593Smuzhiyun case CS8: info->params.data_bits = 8; break;
1924*4882a593Smuzhiyun /* Never happens, but GCC is too dumb to figure it out */
1925*4882a593Smuzhiyun default: info->params.data_bits = 7; break;
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun
1928*4882a593Smuzhiyun if (cflag & CSTOPB)
1929*4882a593Smuzhiyun info->params.stop_bits = 2;
1930*4882a593Smuzhiyun else
1931*4882a593Smuzhiyun info->params.stop_bits = 1;
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun info->params.parity = ASYNC_PARITY_NONE;
1934*4882a593Smuzhiyun if (cflag & PARENB) {
1935*4882a593Smuzhiyun if (cflag & PARODD)
1936*4882a593Smuzhiyun info->params.parity = ASYNC_PARITY_ODD;
1937*4882a593Smuzhiyun else
1938*4882a593Smuzhiyun info->params.parity = ASYNC_PARITY_EVEN;
1939*4882a593Smuzhiyun #ifdef CMSPAR
1940*4882a593Smuzhiyun if (cflag & CMSPAR)
1941*4882a593Smuzhiyun info->params.parity = ASYNC_PARITY_SPACE;
1942*4882a593Smuzhiyun #endif
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun /* calculate number of jiffies to transmit a full
1946*4882a593Smuzhiyun * FIFO (32 bytes) at specified data rate
1947*4882a593Smuzhiyun */
1948*4882a593Smuzhiyun bits_per_char = info->params.data_bits +
1949*4882a593Smuzhiyun info->params.stop_bits + 1;
1950*4882a593Smuzhiyun
1951*4882a593Smuzhiyun /* if port data rate is set to 460800 or less then
1952*4882a593Smuzhiyun * allow tty settings to override, otherwise keep the
1953*4882a593Smuzhiyun * current data rate.
1954*4882a593Smuzhiyun */
1955*4882a593Smuzhiyun if (info->params.data_rate <= 460800)
1956*4882a593Smuzhiyun info->params.data_rate = tty_get_baud_rate(info->port.tty);
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun if ( info->params.data_rate ) {
1959*4882a593Smuzhiyun info->timeout = (32*HZ*bits_per_char) /
1960*4882a593Smuzhiyun info->params.data_rate;
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun info->timeout += HZ/50; /* Add .02 seconds of slop */
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
1965*4882a593Smuzhiyun tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun /* process tty input control flags */
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun info->read_status_mask = RXSTATUS_OVERRUN;
1970*4882a593Smuzhiyun if (I_INPCK(info->port.tty))
1971*4882a593Smuzhiyun info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1972*4882a593Smuzhiyun if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1973*4882a593Smuzhiyun info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun if (I_IGNPAR(info->port.tty))
1976*4882a593Smuzhiyun info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1977*4882a593Smuzhiyun if (I_IGNBRK(info->port.tty)) {
1978*4882a593Smuzhiyun info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1979*4882a593Smuzhiyun /* If ignoring parity and break indicators, ignore
1980*4882a593Smuzhiyun * overruns too. (For real raw support).
1981*4882a593Smuzhiyun */
1982*4882a593Smuzhiyun if (I_IGNPAR(info->port.tty))
1983*4882a593Smuzhiyun info->ignore_status_mask |= RXSTATUS_OVERRUN;
1984*4882a593Smuzhiyun }
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun mgsl_program_hw(info);
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun } /* end of mgsl_change_params() */
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun /* mgsl_put_char()
1991*4882a593Smuzhiyun *
1992*4882a593Smuzhiyun * Add a character to the transmit buffer.
1993*4882a593Smuzhiyun *
1994*4882a593Smuzhiyun * Arguments: tty pointer to tty information structure
1995*4882a593Smuzhiyun * ch character to add to transmit buffer
1996*4882a593Smuzhiyun *
1997*4882a593Smuzhiyun * Return Value: None
1998*4882a593Smuzhiyun */
mgsl_put_char(struct tty_struct * tty,unsigned char ch)1999*4882a593Smuzhiyun static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2000*4882a593Smuzhiyun {
2001*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2002*4882a593Smuzhiyun unsigned long flags;
2003*4882a593Smuzhiyun int ret = 0;
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO) {
2006*4882a593Smuzhiyun printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2007*4882a593Smuzhiyun __FILE__, __LINE__, ch, info->device_name);
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2011*4882a593Smuzhiyun return 0;
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun if (!info->xmit_buf)
2014*4882a593Smuzhiyun return 0;
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock, flags);
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2019*4882a593Smuzhiyun if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2020*4882a593Smuzhiyun info->xmit_buf[info->xmit_head++] = ch;
2021*4882a593Smuzhiyun info->xmit_head &= SERIAL_XMIT_SIZE-1;
2022*4882a593Smuzhiyun info->xmit_cnt++;
2023*4882a593Smuzhiyun ret = 1;
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun }
2026*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock, flags);
2027*4882a593Smuzhiyun return ret;
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun } /* end of mgsl_put_char() */
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun /* mgsl_flush_chars()
2032*4882a593Smuzhiyun *
2033*4882a593Smuzhiyun * Enable transmitter so remaining characters in the
2034*4882a593Smuzhiyun * transmit buffer are sent.
2035*4882a593Smuzhiyun *
2036*4882a593Smuzhiyun * Arguments: tty pointer to tty information structure
2037*4882a593Smuzhiyun * Return Value: None
2038*4882a593Smuzhiyun */
mgsl_flush_chars(struct tty_struct * tty)2039*4882a593Smuzhiyun static void mgsl_flush_chars(struct tty_struct *tty)
2040*4882a593Smuzhiyun {
2041*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2042*4882a593Smuzhiyun unsigned long flags;
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2045*4882a593Smuzhiyun printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2046*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2049*4882a593Smuzhiyun return;
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2052*4882a593Smuzhiyun !info->xmit_buf)
2053*4882a593Smuzhiyun return;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2056*4882a593Smuzhiyun printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2057*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name );
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun if (!info->tx_active) {
2062*4882a593Smuzhiyun if ( (info->params.mode == MGSL_MODE_HDLC ||
2063*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2064*4882a593Smuzhiyun /* operating in synchronous (frame oriented) mode */
2065*4882a593Smuzhiyun /* copy data from circular xmit_buf to */
2066*4882a593Smuzhiyun /* transmit DMA buffer. */
2067*4882a593Smuzhiyun mgsl_load_tx_dma_buffer(info,
2068*4882a593Smuzhiyun info->xmit_buf,info->xmit_cnt);
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun usc_start_transmitter(info);
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun
2073*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun } /* end of mgsl_flush_chars() */
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun /* mgsl_write()
2078*4882a593Smuzhiyun *
2079*4882a593Smuzhiyun * Send a block of data
2080*4882a593Smuzhiyun *
2081*4882a593Smuzhiyun * Arguments:
2082*4882a593Smuzhiyun *
2083*4882a593Smuzhiyun * tty pointer to tty information structure
2084*4882a593Smuzhiyun * buf pointer to buffer containing send data
2085*4882a593Smuzhiyun * count size of send data in bytes
2086*4882a593Smuzhiyun *
2087*4882a593Smuzhiyun * Return Value: number of characters written
2088*4882a593Smuzhiyun */
mgsl_write(struct tty_struct * tty,const unsigned char * buf,int count)2089*4882a593Smuzhiyun static int mgsl_write(struct tty_struct * tty,
2090*4882a593Smuzhiyun const unsigned char *buf, int count)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun int c, ret = 0;
2093*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2094*4882a593Smuzhiyun unsigned long flags;
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2097*4882a593Smuzhiyun printk( "%s(%d):mgsl_write(%s) count=%d\n",
2098*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,count);
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2101*4882a593Smuzhiyun goto cleanup;
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun if (!info->xmit_buf)
2104*4882a593Smuzhiyun goto cleanup;
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_HDLC ||
2107*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW ) {
2108*4882a593Smuzhiyun /* operating in synchronous (frame oriented) mode */
2109*4882a593Smuzhiyun if (info->tx_active) {
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_HDLC ) {
2112*4882a593Smuzhiyun ret = 0;
2113*4882a593Smuzhiyun goto cleanup;
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun /* transmitter is actively sending data -
2116*4882a593Smuzhiyun * if we have multiple transmit dma and
2117*4882a593Smuzhiyun * holding buffers, attempt to queue this
2118*4882a593Smuzhiyun * frame for transmission at a later time.
2119*4882a593Smuzhiyun */
2120*4882a593Smuzhiyun if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2121*4882a593Smuzhiyun /* no tx holding buffers available */
2122*4882a593Smuzhiyun ret = 0;
2123*4882a593Smuzhiyun goto cleanup;
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun /* queue transmit frame request */
2127*4882a593Smuzhiyun ret = count;
2128*4882a593Smuzhiyun save_tx_buffer_request(info,buf,count);
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun /* if we have sufficient tx dma buffers,
2131*4882a593Smuzhiyun * load the next buffered tx request
2132*4882a593Smuzhiyun */
2133*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2134*4882a593Smuzhiyun load_next_tx_holding_buffer(info);
2135*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2136*4882a593Smuzhiyun goto cleanup;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun /* if operating in HDLC LoopMode and the adapter */
2140*4882a593Smuzhiyun /* has yet to be inserted into the loop, we can't */
2141*4882a593Smuzhiyun /* transmit */
2142*4882a593Smuzhiyun
2143*4882a593Smuzhiyun if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2144*4882a593Smuzhiyun !usc_loopmode_active(info) )
2145*4882a593Smuzhiyun {
2146*4882a593Smuzhiyun ret = 0;
2147*4882a593Smuzhiyun goto cleanup;
2148*4882a593Smuzhiyun }
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun if ( info->xmit_cnt ) {
2151*4882a593Smuzhiyun /* Send accumulated from send_char() calls */
2152*4882a593Smuzhiyun /* as frame and wait before accepting more data. */
2153*4882a593Smuzhiyun ret = 0;
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun /* copy data from circular xmit_buf to */
2156*4882a593Smuzhiyun /* transmit DMA buffer. */
2157*4882a593Smuzhiyun mgsl_load_tx_dma_buffer(info,
2158*4882a593Smuzhiyun info->xmit_buf,info->xmit_cnt);
2159*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2160*4882a593Smuzhiyun printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2161*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
2162*4882a593Smuzhiyun } else {
2163*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2164*4882a593Smuzhiyun printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2165*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
2166*4882a593Smuzhiyun ret = count;
2167*4882a593Smuzhiyun info->xmit_cnt = count;
2168*4882a593Smuzhiyun mgsl_load_tx_dma_buffer(info,buf,count);
2169*4882a593Smuzhiyun }
2170*4882a593Smuzhiyun } else {
2171*4882a593Smuzhiyun while (1) {
2172*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2173*4882a593Smuzhiyun c = min_t(int, count,
2174*4882a593Smuzhiyun min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2175*4882a593Smuzhiyun SERIAL_XMIT_SIZE - info->xmit_head));
2176*4882a593Smuzhiyun if (c <= 0) {
2177*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2178*4882a593Smuzhiyun break;
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun memcpy(info->xmit_buf + info->xmit_head, buf, c);
2181*4882a593Smuzhiyun info->xmit_head = ((info->xmit_head + c) &
2182*4882a593Smuzhiyun (SERIAL_XMIT_SIZE-1));
2183*4882a593Smuzhiyun info->xmit_cnt += c;
2184*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2185*4882a593Smuzhiyun buf += c;
2186*4882a593Smuzhiyun count -= c;
2187*4882a593Smuzhiyun ret += c;
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun }
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2192*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2193*4882a593Smuzhiyun if (!info->tx_active)
2194*4882a593Smuzhiyun usc_start_transmitter(info);
2195*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2196*4882a593Smuzhiyun }
2197*4882a593Smuzhiyun cleanup:
2198*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2199*4882a593Smuzhiyun printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2200*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,ret);
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun return ret;
2203*4882a593Smuzhiyun
2204*4882a593Smuzhiyun } /* end of mgsl_write() */
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun /* mgsl_write_room()
2207*4882a593Smuzhiyun *
2208*4882a593Smuzhiyun * Return the count of free bytes in transmit buffer
2209*4882a593Smuzhiyun *
2210*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
2211*4882a593Smuzhiyun * Return Value: None
2212*4882a593Smuzhiyun */
mgsl_write_room(struct tty_struct * tty)2213*4882a593Smuzhiyun static int mgsl_write_room(struct tty_struct *tty)
2214*4882a593Smuzhiyun {
2215*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2216*4882a593Smuzhiyun int ret;
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2219*4882a593Smuzhiyun return 0;
2220*4882a593Smuzhiyun ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2221*4882a593Smuzhiyun if (ret < 0)
2222*4882a593Smuzhiyun ret = 0;
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2225*4882a593Smuzhiyun printk("%s(%d):mgsl_write_room(%s)=%d\n",
2226*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name,ret );
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_HDLC ||
2229*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW ) {
2230*4882a593Smuzhiyun /* operating in synchronous (frame oriented) mode */
2231*4882a593Smuzhiyun if ( info->tx_active )
2232*4882a593Smuzhiyun return 0;
2233*4882a593Smuzhiyun else
2234*4882a593Smuzhiyun return HDLC_MAX_FRAME_SIZE;
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun return ret;
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun } /* end of mgsl_write_room() */
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun /* mgsl_chars_in_buffer()
2242*4882a593Smuzhiyun *
2243*4882a593Smuzhiyun * Return the count of bytes in transmit buffer
2244*4882a593Smuzhiyun *
2245*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
2246*4882a593Smuzhiyun * Return Value: None
2247*4882a593Smuzhiyun */
mgsl_chars_in_buffer(struct tty_struct * tty)2248*4882a593Smuzhiyun static int mgsl_chars_in_buffer(struct tty_struct *tty)
2249*4882a593Smuzhiyun {
2250*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2253*4882a593Smuzhiyun printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2254*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2257*4882a593Smuzhiyun return 0;
2258*4882a593Smuzhiyun
2259*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2260*4882a593Smuzhiyun printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2261*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_HDLC ||
2264*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW ) {
2265*4882a593Smuzhiyun /* operating in synchronous (frame oriented) mode */
2266*4882a593Smuzhiyun if ( info->tx_active )
2267*4882a593Smuzhiyun return info->max_frame_size;
2268*4882a593Smuzhiyun else
2269*4882a593Smuzhiyun return 0;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun return info->xmit_cnt;
2273*4882a593Smuzhiyun } /* end of mgsl_chars_in_buffer() */
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun /* mgsl_flush_buffer()
2276*4882a593Smuzhiyun *
2277*4882a593Smuzhiyun * Discard all data in the send buffer
2278*4882a593Smuzhiyun *
2279*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
2280*4882a593Smuzhiyun * Return Value: None
2281*4882a593Smuzhiyun */
mgsl_flush_buffer(struct tty_struct * tty)2282*4882a593Smuzhiyun static void mgsl_flush_buffer(struct tty_struct *tty)
2283*4882a593Smuzhiyun {
2284*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2285*4882a593Smuzhiyun unsigned long flags;
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2288*4882a593Smuzhiyun printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2289*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2292*4882a593Smuzhiyun return;
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2295*4882a593Smuzhiyun info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2296*4882a593Smuzhiyun del_timer(&info->tx_timer);
2297*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun tty_wakeup(tty);
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun /* mgsl_send_xchar()
2303*4882a593Smuzhiyun *
2304*4882a593Smuzhiyun * Send a high-priority XON/XOFF character
2305*4882a593Smuzhiyun *
2306*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
2307*4882a593Smuzhiyun * ch character to send
2308*4882a593Smuzhiyun * Return Value: None
2309*4882a593Smuzhiyun */
mgsl_send_xchar(struct tty_struct * tty,char ch)2310*4882a593Smuzhiyun static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2311*4882a593Smuzhiyun {
2312*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2313*4882a593Smuzhiyun unsigned long flags;
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2316*4882a593Smuzhiyun printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2317*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name, ch );
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2320*4882a593Smuzhiyun return;
2321*4882a593Smuzhiyun
2322*4882a593Smuzhiyun info->x_char = ch;
2323*4882a593Smuzhiyun if (ch) {
2324*4882a593Smuzhiyun /* Make sure transmit interrupts are on */
2325*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2326*4882a593Smuzhiyun if (!info->tx_enabled)
2327*4882a593Smuzhiyun usc_start_transmitter(info);
2328*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2329*4882a593Smuzhiyun }
2330*4882a593Smuzhiyun } /* end of mgsl_send_xchar() */
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun /* mgsl_throttle()
2333*4882a593Smuzhiyun *
2334*4882a593Smuzhiyun * Signal remote device to throttle send data (our receive data)
2335*4882a593Smuzhiyun *
2336*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
2337*4882a593Smuzhiyun * Return Value: None
2338*4882a593Smuzhiyun */
mgsl_throttle(struct tty_struct * tty)2339*4882a593Smuzhiyun static void mgsl_throttle(struct tty_struct * tty)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2342*4882a593Smuzhiyun unsigned long flags;
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2345*4882a593Smuzhiyun printk("%s(%d):mgsl_throttle(%s) entry\n",
2346*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2349*4882a593Smuzhiyun return;
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun if (I_IXOFF(tty))
2352*4882a593Smuzhiyun mgsl_send_xchar(tty, STOP_CHAR(tty));
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun if (C_CRTSCTS(tty)) {
2355*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2356*4882a593Smuzhiyun info->serial_signals &= ~SerialSignal_RTS;
2357*4882a593Smuzhiyun usc_set_serial_signals(info);
2358*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun } /* end of mgsl_throttle() */
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun /* mgsl_unthrottle()
2363*4882a593Smuzhiyun *
2364*4882a593Smuzhiyun * Signal remote device to stop throttling send data (our receive data)
2365*4882a593Smuzhiyun *
2366*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
2367*4882a593Smuzhiyun * Return Value: None
2368*4882a593Smuzhiyun */
mgsl_unthrottle(struct tty_struct * tty)2369*4882a593Smuzhiyun static void mgsl_unthrottle(struct tty_struct * tty)
2370*4882a593Smuzhiyun {
2371*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2372*4882a593Smuzhiyun unsigned long flags;
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2375*4882a593Smuzhiyun printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2376*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2379*4882a593Smuzhiyun return;
2380*4882a593Smuzhiyun
2381*4882a593Smuzhiyun if (I_IXOFF(tty)) {
2382*4882a593Smuzhiyun if (info->x_char)
2383*4882a593Smuzhiyun info->x_char = 0;
2384*4882a593Smuzhiyun else
2385*4882a593Smuzhiyun mgsl_send_xchar(tty, START_CHAR(tty));
2386*4882a593Smuzhiyun }
2387*4882a593Smuzhiyun
2388*4882a593Smuzhiyun if (C_CRTSCTS(tty)) {
2389*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2390*4882a593Smuzhiyun info->serial_signals |= SerialSignal_RTS;
2391*4882a593Smuzhiyun usc_set_serial_signals(info);
2392*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2393*4882a593Smuzhiyun }
2394*4882a593Smuzhiyun
2395*4882a593Smuzhiyun } /* end of mgsl_unthrottle() */
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun /* mgsl_get_stats()
2398*4882a593Smuzhiyun *
2399*4882a593Smuzhiyun * get the current serial parameters information
2400*4882a593Smuzhiyun *
2401*4882a593Smuzhiyun * Arguments: info pointer to device instance data
2402*4882a593Smuzhiyun * user_icount pointer to buffer to hold returned stats
2403*4882a593Smuzhiyun *
2404*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2405*4882a593Smuzhiyun */
mgsl_get_stats(struct mgsl_struct * info,struct mgsl_icount __user * user_icount)2406*4882a593Smuzhiyun static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2407*4882a593Smuzhiyun {
2408*4882a593Smuzhiyun int err;
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2411*4882a593Smuzhiyun printk("%s(%d):mgsl_get_params(%s)\n",
2412*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name);
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun if (!user_icount) {
2415*4882a593Smuzhiyun memset(&info->icount, 0, sizeof(info->icount));
2416*4882a593Smuzhiyun } else {
2417*4882a593Smuzhiyun mutex_lock(&info->port.mutex);
2418*4882a593Smuzhiyun COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2419*4882a593Smuzhiyun mutex_unlock(&info->port.mutex);
2420*4882a593Smuzhiyun if (err)
2421*4882a593Smuzhiyun return -EFAULT;
2422*4882a593Smuzhiyun }
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun return 0;
2425*4882a593Smuzhiyun
2426*4882a593Smuzhiyun } /* end of mgsl_get_stats() */
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun /* mgsl_get_params()
2429*4882a593Smuzhiyun *
2430*4882a593Smuzhiyun * get the current serial parameters information
2431*4882a593Smuzhiyun *
2432*4882a593Smuzhiyun * Arguments: info pointer to device instance data
2433*4882a593Smuzhiyun * user_params pointer to buffer to hold returned params
2434*4882a593Smuzhiyun *
2435*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2436*4882a593Smuzhiyun */
mgsl_get_params(struct mgsl_struct * info,MGSL_PARAMS __user * user_params)2437*4882a593Smuzhiyun static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2438*4882a593Smuzhiyun {
2439*4882a593Smuzhiyun int err;
2440*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2441*4882a593Smuzhiyun printk("%s(%d):mgsl_get_params(%s)\n",
2442*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name);
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun mutex_lock(&info->port.mutex);
2445*4882a593Smuzhiyun COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2446*4882a593Smuzhiyun mutex_unlock(&info->port.mutex);
2447*4882a593Smuzhiyun if (err) {
2448*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2449*4882a593Smuzhiyun printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2450*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
2451*4882a593Smuzhiyun return -EFAULT;
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun return 0;
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun } /* end of mgsl_get_params() */
2457*4882a593Smuzhiyun
2458*4882a593Smuzhiyun /* mgsl_set_params()
2459*4882a593Smuzhiyun *
2460*4882a593Smuzhiyun * set the serial parameters
2461*4882a593Smuzhiyun *
2462*4882a593Smuzhiyun * Arguments:
2463*4882a593Smuzhiyun *
2464*4882a593Smuzhiyun * info pointer to device instance data
2465*4882a593Smuzhiyun * new_params user buffer containing new serial params
2466*4882a593Smuzhiyun *
2467*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2468*4882a593Smuzhiyun */
mgsl_set_params(struct mgsl_struct * info,MGSL_PARAMS __user * new_params)2469*4882a593Smuzhiyun static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2470*4882a593Smuzhiyun {
2471*4882a593Smuzhiyun unsigned long flags;
2472*4882a593Smuzhiyun MGSL_PARAMS tmp_params;
2473*4882a593Smuzhiyun int err;
2474*4882a593Smuzhiyun
2475*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2476*4882a593Smuzhiyun printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2477*4882a593Smuzhiyun info->device_name );
2478*4882a593Smuzhiyun COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2479*4882a593Smuzhiyun if (err) {
2480*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2481*4882a593Smuzhiyun printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2482*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
2483*4882a593Smuzhiyun return -EFAULT;
2484*4882a593Smuzhiyun }
2485*4882a593Smuzhiyun
2486*4882a593Smuzhiyun mutex_lock(&info->port.mutex);
2487*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2488*4882a593Smuzhiyun memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2489*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun mgsl_change_params(info);
2492*4882a593Smuzhiyun mutex_unlock(&info->port.mutex);
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun return 0;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun } /* end of mgsl_set_params() */
2497*4882a593Smuzhiyun
2498*4882a593Smuzhiyun /* mgsl_get_txidle()
2499*4882a593Smuzhiyun *
2500*4882a593Smuzhiyun * get the current transmit idle mode
2501*4882a593Smuzhiyun *
2502*4882a593Smuzhiyun * Arguments: info pointer to device instance data
2503*4882a593Smuzhiyun * idle_mode pointer to buffer to hold returned idle mode
2504*4882a593Smuzhiyun *
2505*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2506*4882a593Smuzhiyun */
mgsl_get_txidle(struct mgsl_struct * info,int __user * idle_mode)2507*4882a593Smuzhiyun static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2508*4882a593Smuzhiyun {
2509*4882a593Smuzhiyun int err;
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2512*4882a593Smuzhiyun printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2513*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name, info->idle_mode);
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2516*4882a593Smuzhiyun if (err) {
2517*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
2518*4882a593Smuzhiyun printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2519*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
2520*4882a593Smuzhiyun return -EFAULT;
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun return 0;
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun } /* end of mgsl_get_txidle() */
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun /* mgsl_set_txidle() service ioctl to set transmit idle mode
2528*4882a593Smuzhiyun *
2529*4882a593Smuzhiyun * Arguments: info pointer to device instance data
2530*4882a593Smuzhiyun * idle_mode new idle mode
2531*4882a593Smuzhiyun *
2532*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2533*4882a593Smuzhiyun */
mgsl_set_txidle(struct mgsl_struct * info,int idle_mode)2534*4882a593Smuzhiyun static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2535*4882a593Smuzhiyun {
2536*4882a593Smuzhiyun unsigned long flags;
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2539*4882a593Smuzhiyun printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2540*4882a593Smuzhiyun info->device_name, idle_mode );
2541*4882a593Smuzhiyun
2542*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2543*4882a593Smuzhiyun info->idle_mode = idle_mode;
2544*4882a593Smuzhiyun usc_set_txidle( info );
2545*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2546*4882a593Smuzhiyun return 0;
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun } /* end of mgsl_set_txidle() */
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun /* mgsl_txenable()
2551*4882a593Smuzhiyun *
2552*4882a593Smuzhiyun * enable or disable the transmitter
2553*4882a593Smuzhiyun *
2554*4882a593Smuzhiyun * Arguments:
2555*4882a593Smuzhiyun *
2556*4882a593Smuzhiyun * info pointer to device instance data
2557*4882a593Smuzhiyun * enable 1 = enable, 0 = disable
2558*4882a593Smuzhiyun *
2559*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2560*4882a593Smuzhiyun */
mgsl_txenable(struct mgsl_struct * info,int enable)2561*4882a593Smuzhiyun static int mgsl_txenable(struct mgsl_struct * info, int enable)
2562*4882a593Smuzhiyun {
2563*4882a593Smuzhiyun unsigned long flags;
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2566*4882a593Smuzhiyun printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2567*4882a593Smuzhiyun info->device_name, enable);
2568*4882a593Smuzhiyun
2569*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2570*4882a593Smuzhiyun if ( enable ) {
2571*4882a593Smuzhiyun if ( !info->tx_enabled ) {
2572*4882a593Smuzhiyun
2573*4882a593Smuzhiyun usc_start_transmitter(info);
2574*4882a593Smuzhiyun /*--------------------------------------------------
2575*4882a593Smuzhiyun * if HDLC/SDLC Loop mode, attempt to insert the
2576*4882a593Smuzhiyun * station in the 'loop' by setting CMR:13. Upon
2577*4882a593Smuzhiyun * receipt of the next GoAhead (RxAbort) sequence,
2578*4882a593Smuzhiyun * the OnLoop indicator (CCSR:7) should go active
2579*4882a593Smuzhiyun * to indicate that we are on the loop
2580*4882a593Smuzhiyun *--------------------------------------------------*/
2581*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2582*4882a593Smuzhiyun usc_loopmode_insert_request( info );
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun } else {
2585*4882a593Smuzhiyun if ( info->tx_enabled )
2586*4882a593Smuzhiyun usc_stop_transmitter(info);
2587*4882a593Smuzhiyun }
2588*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2589*4882a593Smuzhiyun return 0;
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun } /* end of mgsl_txenable() */
2592*4882a593Smuzhiyun
2593*4882a593Smuzhiyun /* mgsl_txabort() abort send HDLC frame
2594*4882a593Smuzhiyun *
2595*4882a593Smuzhiyun * Arguments: info pointer to device instance data
2596*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2597*4882a593Smuzhiyun */
mgsl_txabort(struct mgsl_struct * info)2598*4882a593Smuzhiyun static int mgsl_txabort(struct mgsl_struct * info)
2599*4882a593Smuzhiyun {
2600*4882a593Smuzhiyun unsigned long flags;
2601*4882a593Smuzhiyun
2602*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2603*4882a593Smuzhiyun printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2604*4882a593Smuzhiyun info->device_name);
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2607*4882a593Smuzhiyun if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2608*4882a593Smuzhiyun {
2609*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2610*4882a593Smuzhiyun usc_loopmode_cancel_transmit( info );
2611*4882a593Smuzhiyun else
2612*4882a593Smuzhiyun usc_TCmd(info,TCmd_SendAbort);
2613*4882a593Smuzhiyun }
2614*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2615*4882a593Smuzhiyun return 0;
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun } /* end of mgsl_txabort() */
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun /* mgsl_rxenable() enable or disable the receiver
2620*4882a593Smuzhiyun *
2621*4882a593Smuzhiyun * Arguments: info pointer to device instance data
2622*4882a593Smuzhiyun * enable 1 = enable, 0 = disable
2623*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2624*4882a593Smuzhiyun */
mgsl_rxenable(struct mgsl_struct * info,int enable)2625*4882a593Smuzhiyun static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2626*4882a593Smuzhiyun {
2627*4882a593Smuzhiyun unsigned long flags;
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2630*4882a593Smuzhiyun printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2631*4882a593Smuzhiyun info->device_name, enable);
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2634*4882a593Smuzhiyun if ( enable ) {
2635*4882a593Smuzhiyun if ( !info->rx_enabled )
2636*4882a593Smuzhiyun usc_start_receiver(info);
2637*4882a593Smuzhiyun } else {
2638*4882a593Smuzhiyun if ( info->rx_enabled )
2639*4882a593Smuzhiyun usc_stop_receiver(info);
2640*4882a593Smuzhiyun }
2641*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2642*4882a593Smuzhiyun return 0;
2643*4882a593Smuzhiyun
2644*4882a593Smuzhiyun } /* end of mgsl_rxenable() */
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun /* mgsl_wait_event() wait for specified event to occur
2647*4882a593Smuzhiyun *
2648*4882a593Smuzhiyun * Arguments: info pointer to device instance data
2649*4882a593Smuzhiyun * mask pointer to bitmask of events to wait for
2650*4882a593Smuzhiyun * Return Value: 0 if successful and bit mask updated with
2651*4882a593Smuzhiyun * of events triggerred,
2652*4882a593Smuzhiyun * otherwise error code
2653*4882a593Smuzhiyun */
mgsl_wait_event(struct mgsl_struct * info,int __user * mask_ptr)2654*4882a593Smuzhiyun static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2655*4882a593Smuzhiyun {
2656*4882a593Smuzhiyun unsigned long flags;
2657*4882a593Smuzhiyun int s;
2658*4882a593Smuzhiyun int rc=0;
2659*4882a593Smuzhiyun struct mgsl_icount cprev, cnow;
2660*4882a593Smuzhiyun int events;
2661*4882a593Smuzhiyun int mask;
2662*4882a593Smuzhiyun struct _input_signal_events oldsigs, newsigs;
2663*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2666*4882a593Smuzhiyun if (rc) {
2667*4882a593Smuzhiyun return -EFAULT;
2668*4882a593Smuzhiyun }
2669*4882a593Smuzhiyun
2670*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2671*4882a593Smuzhiyun printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2672*4882a593Smuzhiyun info->device_name, mask);
2673*4882a593Smuzhiyun
2674*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun /* return immediately if state matches requested events */
2677*4882a593Smuzhiyun usc_get_serial_signals(info);
2678*4882a593Smuzhiyun s = info->serial_signals;
2679*4882a593Smuzhiyun events = mask &
2680*4882a593Smuzhiyun ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2681*4882a593Smuzhiyun ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2682*4882a593Smuzhiyun ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2683*4882a593Smuzhiyun ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2684*4882a593Smuzhiyun if (events) {
2685*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2686*4882a593Smuzhiyun goto exit;
2687*4882a593Smuzhiyun }
2688*4882a593Smuzhiyun
2689*4882a593Smuzhiyun /* save current irq counts */
2690*4882a593Smuzhiyun cprev = info->icount;
2691*4882a593Smuzhiyun oldsigs = info->input_signal_events;
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun /* enable hunt and idle irqs if needed */
2694*4882a593Smuzhiyun if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2695*4882a593Smuzhiyun u16 oldreg = usc_InReg(info,RICR);
2696*4882a593Smuzhiyun u16 newreg = oldreg +
2697*4882a593Smuzhiyun (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2698*4882a593Smuzhiyun (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2699*4882a593Smuzhiyun if (oldreg != newreg)
2700*4882a593Smuzhiyun usc_OutReg(info, RICR, newreg);
2701*4882a593Smuzhiyun }
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
2704*4882a593Smuzhiyun add_wait_queue(&info->event_wait_q, &wait);
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun
2709*4882a593Smuzhiyun for(;;) {
2710*4882a593Smuzhiyun schedule();
2711*4882a593Smuzhiyun if (signal_pending(current)) {
2712*4882a593Smuzhiyun rc = -ERESTARTSYS;
2713*4882a593Smuzhiyun break;
2714*4882a593Smuzhiyun }
2715*4882a593Smuzhiyun
2716*4882a593Smuzhiyun /* get current irq counts */
2717*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2718*4882a593Smuzhiyun cnow = info->icount;
2719*4882a593Smuzhiyun newsigs = info->input_signal_events;
2720*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
2721*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun /* if no change, wait aborted for some reason */
2724*4882a593Smuzhiyun if (newsigs.dsr_up == oldsigs.dsr_up &&
2725*4882a593Smuzhiyun newsigs.dsr_down == oldsigs.dsr_down &&
2726*4882a593Smuzhiyun newsigs.dcd_up == oldsigs.dcd_up &&
2727*4882a593Smuzhiyun newsigs.dcd_down == oldsigs.dcd_down &&
2728*4882a593Smuzhiyun newsigs.cts_up == oldsigs.cts_up &&
2729*4882a593Smuzhiyun newsigs.cts_down == oldsigs.cts_down &&
2730*4882a593Smuzhiyun newsigs.ri_up == oldsigs.ri_up &&
2731*4882a593Smuzhiyun newsigs.ri_down == oldsigs.ri_down &&
2732*4882a593Smuzhiyun cnow.exithunt == cprev.exithunt &&
2733*4882a593Smuzhiyun cnow.rxidle == cprev.rxidle) {
2734*4882a593Smuzhiyun rc = -EIO;
2735*4882a593Smuzhiyun break;
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun
2738*4882a593Smuzhiyun events = mask &
2739*4882a593Smuzhiyun ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2740*4882a593Smuzhiyun (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2741*4882a593Smuzhiyun (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2742*4882a593Smuzhiyun (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2743*4882a593Smuzhiyun (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2744*4882a593Smuzhiyun (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2745*4882a593Smuzhiyun (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2746*4882a593Smuzhiyun (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2747*4882a593Smuzhiyun (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2748*4882a593Smuzhiyun (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2749*4882a593Smuzhiyun if (events)
2750*4882a593Smuzhiyun break;
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun cprev = cnow;
2753*4882a593Smuzhiyun oldsigs = newsigs;
2754*4882a593Smuzhiyun }
2755*4882a593Smuzhiyun
2756*4882a593Smuzhiyun remove_wait_queue(&info->event_wait_q, &wait);
2757*4882a593Smuzhiyun set_current_state(TASK_RUNNING);
2758*4882a593Smuzhiyun
2759*4882a593Smuzhiyun if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2760*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2761*4882a593Smuzhiyun if (!waitqueue_active(&info->event_wait_q)) {
2762*4882a593Smuzhiyun /* disable enable exit hunt mode/idle rcvd IRQs */
2763*4882a593Smuzhiyun usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2764*4882a593Smuzhiyun ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
2765*4882a593Smuzhiyun }
2766*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2767*4882a593Smuzhiyun }
2768*4882a593Smuzhiyun exit:
2769*4882a593Smuzhiyun if ( rc == 0 )
2770*4882a593Smuzhiyun PUT_USER(rc, events, mask_ptr);
2771*4882a593Smuzhiyun
2772*4882a593Smuzhiyun return rc;
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun } /* end of mgsl_wait_event() */
2775*4882a593Smuzhiyun
modem_input_wait(struct mgsl_struct * info,int arg)2776*4882a593Smuzhiyun static int modem_input_wait(struct mgsl_struct *info,int arg)
2777*4882a593Smuzhiyun {
2778*4882a593Smuzhiyun unsigned long flags;
2779*4882a593Smuzhiyun int rc;
2780*4882a593Smuzhiyun struct mgsl_icount cprev, cnow;
2781*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
2782*4882a593Smuzhiyun
2783*4882a593Smuzhiyun /* save current irq counts */
2784*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2785*4882a593Smuzhiyun cprev = info->icount;
2786*4882a593Smuzhiyun add_wait_queue(&info->status_event_wait_q, &wait);
2787*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
2788*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun for(;;) {
2791*4882a593Smuzhiyun schedule();
2792*4882a593Smuzhiyun if (signal_pending(current)) {
2793*4882a593Smuzhiyun rc = -ERESTARTSYS;
2794*4882a593Smuzhiyun break;
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun
2797*4882a593Smuzhiyun /* get new irq counts */
2798*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2799*4882a593Smuzhiyun cnow = info->icount;
2800*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
2801*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2802*4882a593Smuzhiyun
2803*4882a593Smuzhiyun /* if no change, wait aborted for some reason */
2804*4882a593Smuzhiyun if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2805*4882a593Smuzhiyun cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2806*4882a593Smuzhiyun rc = -EIO;
2807*4882a593Smuzhiyun break;
2808*4882a593Smuzhiyun }
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun /* check for change in caller specified modem input */
2811*4882a593Smuzhiyun if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2812*4882a593Smuzhiyun (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2813*4882a593Smuzhiyun (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2814*4882a593Smuzhiyun (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2815*4882a593Smuzhiyun rc = 0;
2816*4882a593Smuzhiyun break;
2817*4882a593Smuzhiyun }
2818*4882a593Smuzhiyun
2819*4882a593Smuzhiyun cprev = cnow;
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun remove_wait_queue(&info->status_event_wait_q, &wait);
2822*4882a593Smuzhiyun set_current_state(TASK_RUNNING);
2823*4882a593Smuzhiyun return rc;
2824*4882a593Smuzhiyun }
2825*4882a593Smuzhiyun
2826*4882a593Smuzhiyun /* return the state of the serial control and status signals
2827*4882a593Smuzhiyun */
tiocmget(struct tty_struct * tty)2828*4882a593Smuzhiyun static int tiocmget(struct tty_struct *tty)
2829*4882a593Smuzhiyun {
2830*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2831*4882a593Smuzhiyun unsigned int result;
2832*4882a593Smuzhiyun unsigned long flags;
2833*4882a593Smuzhiyun
2834*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2835*4882a593Smuzhiyun usc_get_serial_signals(info);
2836*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2839*4882a593Smuzhiyun ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2840*4882a593Smuzhiyun ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2841*4882a593Smuzhiyun ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2842*4882a593Smuzhiyun ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2843*4882a593Smuzhiyun ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2844*4882a593Smuzhiyun
2845*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2846*4882a593Smuzhiyun printk("%s(%d):%s tiocmget() value=%08X\n",
2847*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name, result );
2848*4882a593Smuzhiyun return result;
2849*4882a593Smuzhiyun }
2850*4882a593Smuzhiyun
2851*4882a593Smuzhiyun /* set modem control signals (DTR/RTS)
2852*4882a593Smuzhiyun */
tiocmset(struct tty_struct * tty,unsigned int set,unsigned int clear)2853*4882a593Smuzhiyun static int tiocmset(struct tty_struct *tty,
2854*4882a593Smuzhiyun unsigned int set, unsigned int clear)
2855*4882a593Smuzhiyun {
2856*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
2857*4882a593Smuzhiyun unsigned long flags;
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2860*4882a593Smuzhiyun printk("%s(%d):%s tiocmset(%x,%x)\n",
2861*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, set, clear);
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun if (set & TIOCM_RTS)
2864*4882a593Smuzhiyun info->serial_signals |= SerialSignal_RTS;
2865*4882a593Smuzhiyun if (set & TIOCM_DTR)
2866*4882a593Smuzhiyun info->serial_signals |= SerialSignal_DTR;
2867*4882a593Smuzhiyun if (clear & TIOCM_RTS)
2868*4882a593Smuzhiyun info->serial_signals &= ~SerialSignal_RTS;
2869*4882a593Smuzhiyun if (clear & TIOCM_DTR)
2870*4882a593Smuzhiyun info->serial_signals &= ~SerialSignal_DTR;
2871*4882a593Smuzhiyun
2872*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2873*4882a593Smuzhiyun usc_set_serial_signals(info);
2874*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun return 0;
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun /* mgsl_break() Set or clear transmit break condition
2880*4882a593Smuzhiyun *
2881*4882a593Smuzhiyun * Arguments: tty pointer to tty instance data
2882*4882a593Smuzhiyun * break_state -1=set break condition, 0=clear
2883*4882a593Smuzhiyun * Return Value: error code
2884*4882a593Smuzhiyun */
mgsl_break(struct tty_struct * tty,int break_state)2885*4882a593Smuzhiyun static int mgsl_break(struct tty_struct *tty, int break_state)
2886*4882a593Smuzhiyun {
2887*4882a593Smuzhiyun struct mgsl_struct * info = tty->driver_data;
2888*4882a593Smuzhiyun unsigned long flags;
2889*4882a593Smuzhiyun
2890*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2891*4882a593Smuzhiyun printk("%s(%d):mgsl_break(%s,%d)\n",
2892*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name, break_state);
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2895*4882a593Smuzhiyun return -EINVAL;
2896*4882a593Smuzhiyun
2897*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2898*4882a593Smuzhiyun if (break_state == -1)
2899*4882a593Smuzhiyun usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2900*4882a593Smuzhiyun else
2901*4882a593Smuzhiyun usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2902*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2903*4882a593Smuzhiyun return 0;
2904*4882a593Smuzhiyun
2905*4882a593Smuzhiyun } /* end of mgsl_break() */
2906*4882a593Smuzhiyun
2907*4882a593Smuzhiyun /*
2908*4882a593Smuzhiyun * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2909*4882a593Smuzhiyun * Return: write counters to the user passed counter struct
2910*4882a593Smuzhiyun * NB: both 1->0 and 0->1 transitions are counted except for
2911*4882a593Smuzhiyun * RI where only 0->1 is counted.
2912*4882a593Smuzhiyun */
msgl_get_icount(struct tty_struct * tty,struct serial_icounter_struct * icount)2913*4882a593Smuzhiyun static int msgl_get_icount(struct tty_struct *tty,
2914*4882a593Smuzhiyun struct serial_icounter_struct *icount)
2915*4882a593Smuzhiyun
2916*4882a593Smuzhiyun {
2917*4882a593Smuzhiyun struct mgsl_struct * info = tty->driver_data;
2918*4882a593Smuzhiyun struct mgsl_icount cnow; /* kernel counter temps */
2919*4882a593Smuzhiyun unsigned long flags;
2920*4882a593Smuzhiyun
2921*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
2922*4882a593Smuzhiyun cnow = info->icount;
2923*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
2924*4882a593Smuzhiyun
2925*4882a593Smuzhiyun icount->cts = cnow.cts;
2926*4882a593Smuzhiyun icount->dsr = cnow.dsr;
2927*4882a593Smuzhiyun icount->rng = cnow.rng;
2928*4882a593Smuzhiyun icount->dcd = cnow.dcd;
2929*4882a593Smuzhiyun icount->rx = cnow.rx;
2930*4882a593Smuzhiyun icount->tx = cnow.tx;
2931*4882a593Smuzhiyun icount->frame = cnow.frame;
2932*4882a593Smuzhiyun icount->overrun = cnow.overrun;
2933*4882a593Smuzhiyun icount->parity = cnow.parity;
2934*4882a593Smuzhiyun icount->brk = cnow.brk;
2935*4882a593Smuzhiyun icount->buf_overrun = cnow.buf_overrun;
2936*4882a593Smuzhiyun return 0;
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun
2939*4882a593Smuzhiyun /* mgsl_ioctl() Service an IOCTL request
2940*4882a593Smuzhiyun *
2941*4882a593Smuzhiyun * Arguments:
2942*4882a593Smuzhiyun *
2943*4882a593Smuzhiyun * tty pointer to tty instance data
2944*4882a593Smuzhiyun * cmd IOCTL command code
2945*4882a593Smuzhiyun * arg command argument/context
2946*4882a593Smuzhiyun *
2947*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
2948*4882a593Smuzhiyun */
mgsl_ioctl(struct tty_struct * tty,unsigned int cmd,unsigned long arg)2949*4882a593Smuzhiyun static int mgsl_ioctl(struct tty_struct *tty,
2950*4882a593Smuzhiyun unsigned int cmd, unsigned long arg)
2951*4882a593Smuzhiyun {
2952*4882a593Smuzhiyun struct mgsl_struct * info = tty->driver_data;
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
2955*4882a593Smuzhiyun printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2956*4882a593Smuzhiyun info->device_name, cmd );
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2959*4882a593Smuzhiyun return -ENODEV;
2960*4882a593Smuzhiyun
2961*4882a593Smuzhiyun if (cmd != TIOCMIWAIT) {
2962*4882a593Smuzhiyun if (tty_io_error(tty))
2963*4882a593Smuzhiyun return -EIO;
2964*4882a593Smuzhiyun }
2965*4882a593Smuzhiyun
2966*4882a593Smuzhiyun return mgsl_ioctl_common(info, cmd, arg);
2967*4882a593Smuzhiyun }
2968*4882a593Smuzhiyun
mgsl_ioctl_common(struct mgsl_struct * info,unsigned int cmd,unsigned long arg)2969*4882a593Smuzhiyun static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2970*4882a593Smuzhiyun {
2971*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
2972*4882a593Smuzhiyun
2973*4882a593Smuzhiyun switch (cmd) {
2974*4882a593Smuzhiyun case MGSL_IOCGPARAMS:
2975*4882a593Smuzhiyun return mgsl_get_params(info, argp);
2976*4882a593Smuzhiyun case MGSL_IOCSPARAMS:
2977*4882a593Smuzhiyun return mgsl_set_params(info, argp);
2978*4882a593Smuzhiyun case MGSL_IOCGTXIDLE:
2979*4882a593Smuzhiyun return mgsl_get_txidle(info, argp);
2980*4882a593Smuzhiyun case MGSL_IOCSTXIDLE:
2981*4882a593Smuzhiyun return mgsl_set_txidle(info,(int)arg);
2982*4882a593Smuzhiyun case MGSL_IOCTXENABLE:
2983*4882a593Smuzhiyun return mgsl_txenable(info,(int)arg);
2984*4882a593Smuzhiyun case MGSL_IOCRXENABLE:
2985*4882a593Smuzhiyun return mgsl_rxenable(info,(int)arg);
2986*4882a593Smuzhiyun case MGSL_IOCTXABORT:
2987*4882a593Smuzhiyun return mgsl_txabort(info);
2988*4882a593Smuzhiyun case MGSL_IOCGSTATS:
2989*4882a593Smuzhiyun return mgsl_get_stats(info, argp);
2990*4882a593Smuzhiyun case MGSL_IOCWAITEVENT:
2991*4882a593Smuzhiyun return mgsl_wait_event(info, argp);
2992*4882a593Smuzhiyun case MGSL_IOCLOOPTXDONE:
2993*4882a593Smuzhiyun return mgsl_loopmode_send_done(info);
2994*4882a593Smuzhiyun /* Wait for modem input (DCD,RI,DSR,CTS) change
2995*4882a593Smuzhiyun * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
2996*4882a593Smuzhiyun */
2997*4882a593Smuzhiyun case TIOCMIWAIT:
2998*4882a593Smuzhiyun return modem_input_wait(info,(int)arg);
2999*4882a593Smuzhiyun
3000*4882a593Smuzhiyun default:
3001*4882a593Smuzhiyun return -ENOIOCTLCMD;
3002*4882a593Smuzhiyun }
3003*4882a593Smuzhiyun return 0;
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun
3006*4882a593Smuzhiyun /* mgsl_set_termios()
3007*4882a593Smuzhiyun *
3008*4882a593Smuzhiyun * Set new termios settings
3009*4882a593Smuzhiyun *
3010*4882a593Smuzhiyun * Arguments:
3011*4882a593Smuzhiyun *
3012*4882a593Smuzhiyun * tty pointer to tty structure
3013*4882a593Smuzhiyun * termios pointer to buffer to hold returned old termios
3014*4882a593Smuzhiyun *
3015*4882a593Smuzhiyun * Return Value: None
3016*4882a593Smuzhiyun */
mgsl_set_termios(struct tty_struct * tty,struct ktermios * old_termios)3017*4882a593Smuzhiyun static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3018*4882a593Smuzhiyun {
3019*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
3020*4882a593Smuzhiyun unsigned long flags;
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3023*4882a593Smuzhiyun printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3024*4882a593Smuzhiyun tty->driver->name );
3025*4882a593Smuzhiyun
3026*4882a593Smuzhiyun mgsl_change_params(info);
3027*4882a593Smuzhiyun
3028*4882a593Smuzhiyun /* Handle transition to B0 status */
3029*4882a593Smuzhiyun if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
3030*4882a593Smuzhiyun info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3031*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
3032*4882a593Smuzhiyun usc_set_serial_signals(info);
3033*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
3034*4882a593Smuzhiyun }
3035*4882a593Smuzhiyun
3036*4882a593Smuzhiyun /* Handle transition away from B0 status */
3037*4882a593Smuzhiyun if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
3038*4882a593Smuzhiyun info->serial_signals |= SerialSignal_DTR;
3039*4882a593Smuzhiyun if (!C_CRTSCTS(tty) || !tty_throttled(tty))
3040*4882a593Smuzhiyun info->serial_signals |= SerialSignal_RTS;
3041*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
3042*4882a593Smuzhiyun usc_set_serial_signals(info);
3043*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
3044*4882a593Smuzhiyun }
3045*4882a593Smuzhiyun
3046*4882a593Smuzhiyun /* Handle turning off CRTSCTS */
3047*4882a593Smuzhiyun if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
3048*4882a593Smuzhiyun tty->hw_stopped = 0;
3049*4882a593Smuzhiyun mgsl_start(tty);
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun
3052*4882a593Smuzhiyun } /* end of mgsl_set_termios() */
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun /* mgsl_close()
3055*4882a593Smuzhiyun *
3056*4882a593Smuzhiyun * Called when port is closed. Wait for remaining data to be
3057*4882a593Smuzhiyun * sent. Disable port and free resources.
3058*4882a593Smuzhiyun *
3059*4882a593Smuzhiyun * Arguments:
3060*4882a593Smuzhiyun *
3061*4882a593Smuzhiyun * tty pointer to open tty structure
3062*4882a593Smuzhiyun * filp pointer to open file object
3063*4882a593Smuzhiyun *
3064*4882a593Smuzhiyun * Return Value: None
3065*4882a593Smuzhiyun */
mgsl_close(struct tty_struct * tty,struct file * filp)3066*4882a593Smuzhiyun static void mgsl_close(struct tty_struct *tty, struct file * filp)
3067*4882a593Smuzhiyun {
3068*4882a593Smuzhiyun struct mgsl_struct * info = tty->driver_data;
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3071*4882a593Smuzhiyun return;
3072*4882a593Smuzhiyun
3073*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3074*4882a593Smuzhiyun printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3075*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name, info->port.count);
3076*4882a593Smuzhiyun
3077*4882a593Smuzhiyun if (tty_port_close_start(&info->port, tty, filp) == 0)
3078*4882a593Smuzhiyun goto cleanup;
3079*4882a593Smuzhiyun
3080*4882a593Smuzhiyun mutex_lock(&info->port.mutex);
3081*4882a593Smuzhiyun if (tty_port_initialized(&info->port))
3082*4882a593Smuzhiyun mgsl_wait_until_sent(tty, info->timeout);
3083*4882a593Smuzhiyun mgsl_flush_buffer(tty);
3084*4882a593Smuzhiyun tty_ldisc_flush(tty);
3085*4882a593Smuzhiyun shutdown(info);
3086*4882a593Smuzhiyun mutex_unlock(&info->port.mutex);
3087*4882a593Smuzhiyun
3088*4882a593Smuzhiyun tty_port_close_end(&info->port, tty);
3089*4882a593Smuzhiyun info->port.tty = NULL;
3090*4882a593Smuzhiyun cleanup:
3091*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3092*4882a593Smuzhiyun printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3093*4882a593Smuzhiyun tty->driver->name, info->port.count);
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun } /* end of mgsl_close() */
3096*4882a593Smuzhiyun
3097*4882a593Smuzhiyun /* mgsl_wait_until_sent()
3098*4882a593Smuzhiyun *
3099*4882a593Smuzhiyun * Wait until the transmitter is empty.
3100*4882a593Smuzhiyun *
3101*4882a593Smuzhiyun * Arguments:
3102*4882a593Smuzhiyun *
3103*4882a593Smuzhiyun * tty pointer to tty info structure
3104*4882a593Smuzhiyun * timeout time to wait for send completion
3105*4882a593Smuzhiyun *
3106*4882a593Smuzhiyun * Return Value: None
3107*4882a593Smuzhiyun */
mgsl_wait_until_sent(struct tty_struct * tty,int timeout)3108*4882a593Smuzhiyun static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3109*4882a593Smuzhiyun {
3110*4882a593Smuzhiyun struct mgsl_struct * info = tty->driver_data;
3111*4882a593Smuzhiyun unsigned long orig_jiffies, char_time;
3112*4882a593Smuzhiyun
3113*4882a593Smuzhiyun if (!info )
3114*4882a593Smuzhiyun return;
3115*4882a593Smuzhiyun
3116*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3117*4882a593Smuzhiyun printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3118*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3121*4882a593Smuzhiyun return;
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun if (!tty_port_initialized(&info->port))
3124*4882a593Smuzhiyun goto exit;
3125*4882a593Smuzhiyun
3126*4882a593Smuzhiyun orig_jiffies = jiffies;
3127*4882a593Smuzhiyun
3128*4882a593Smuzhiyun /* Set check interval to 1/5 of estimated time to
3129*4882a593Smuzhiyun * send a character, and make it at least 1. The check
3130*4882a593Smuzhiyun * interval should also be less than the timeout.
3131*4882a593Smuzhiyun * Note: use tight timings here to satisfy the NIST-PCTS.
3132*4882a593Smuzhiyun */
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun if ( info->params.data_rate ) {
3135*4882a593Smuzhiyun char_time = info->timeout/(32 * 5);
3136*4882a593Smuzhiyun if (!char_time)
3137*4882a593Smuzhiyun char_time++;
3138*4882a593Smuzhiyun } else
3139*4882a593Smuzhiyun char_time = 1;
3140*4882a593Smuzhiyun
3141*4882a593Smuzhiyun if (timeout)
3142*4882a593Smuzhiyun char_time = min_t(unsigned long, char_time, timeout);
3143*4882a593Smuzhiyun
3144*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_HDLC ||
3145*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW ) {
3146*4882a593Smuzhiyun while (info->tx_active) {
3147*4882a593Smuzhiyun msleep_interruptible(jiffies_to_msecs(char_time));
3148*4882a593Smuzhiyun if (signal_pending(current))
3149*4882a593Smuzhiyun break;
3150*4882a593Smuzhiyun if (timeout && time_after(jiffies, orig_jiffies + timeout))
3151*4882a593Smuzhiyun break;
3152*4882a593Smuzhiyun }
3153*4882a593Smuzhiyun } else {
3154*4882a593Smuzhiyun while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3155*4882a593Smuzhiyun info->tx_enabled) {
3156*4882a593Smuzhiyun msleep_interruptible(jiffies_to_msecs(char_time));
3157*4882a593Smuzhiyun if (signal_pending(current))
3158*4882a593Smuzhiyun break;
3159*4882a593Smuzhiyun if (timeout && time_after(jiffies, orig_jiffies + timeout))
3160*4882a593Smuzhiyun break;
3161*4882a593Smuzhiyun }
3162*4882a593Smuzhiyun }
3163*4882a593Smuzhiyun
3164*4882a593Smuzhiyun exit:
3165*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3166*4882a593Smuzhiyun printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3167*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun } /* end of mgsl_wait_until_sent() */
3170*4882a593Smuzhiyun
3171*4882a593Smuzhiyun /* mgsl_hangup()
3172*4882a593Smuzhiyun *
3173*4882a593Smuzhiyun * Called by tty_hangup() when a hangup is signaled.
3174*4882a593Smuzhiyun * This is the same as to closing all open files for the port.
3175*4882a593Smuzhiyun *
3176*4882a593Smuzhiyun * Arguments: tty pointer to associated tty object
3177*4882a593Smuzhiyun * Return Value: None
3178*4882a593Smuzhiyun */
mgsl_hangup(struct tty_struct * tty)3179*4882a593Smuzhiyun static void mgsl_hangup(struct tty_struct *tty)
3180*4882a593Smuzhiyun {
3181*4882a593Smuzhiyun struct mgsl_struct * info = tty->driver_data;
3182*4882a593Smuzhiyun
3183*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3184*4882a593Smuzhiyun printk("%s(%d):mgsl_hangup(%s)\n",
3185*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
3186*4882a593Smuzhiyun
3187*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3188*4882a593Smuzhiyun return;
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun mgsl_flush_buffer(tty);
3191*4882a593Smuzhiyun shutdown(info);
3192*4882a593Smuzhiyun
3193*4882a593Smuzhiyun info->port.count = 0;
3194*4882a593Smuzhiyun tty_port_set_active(&info->port, 0);
3195*4882a593Smuzhiyun info->port.tty = NULL;
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun wake_up_interruptible(&info->port.open_wait);
3198*4882a593Smuzhiyun
3199*4882a593Smuzhiyun } /* end of mgsl_hangup() */
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun /*
3202*4882a593Smuzhiyun * carrier_raised()
3203*4882a593Smuzhiyun *
3204*4882a593Smuzhiyun * Return true if carrier is raised
3205*4882a593Smuzhiyun */
3206*4882a593Smuzhiyun
carrier_raised(struct tty_port * port)3207*4882a593Smuzhiyun static int carrier_raised(struct tty_port *port)
3208*4882a593Smuzhiyun {
3209*4882a593Smuzhiyun unsigned long flags;
3210*4882a593Smuzhiyun struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3211*4882a593Smuzhiyun
3212*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock, flags);
3213*4882a593Smuzhiyun usc_get_serial_signals(info);
3214*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock, flags);
3215*4882a593Smuzhiyun return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3216*4882a593Smuzhiyun }
3217*4882a593Smuzhiyun
dtr_rts(struct tty_port * port,int on)3218*4882a593Smuzhiyun static void dtr_rts(struct tty_port *port, int on)
3219*4882a593Smuzhiyun {
3220*4882a593Smuzhiyun struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3221*4882a593Smuzhiyun unsigned long flags;
3222*4882a593Smuzhiyun
3223*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
3224*4882a593Smuzhiyun if (on)
3225*4882a593Smuzhiyun info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
3226*4882a593Smuzhiyun else
3227*4882a593Smuzhiyun info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3228*4882a593Smuzhiyun usc_set_serial_signals(info);
3229*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
3230*4882a593Smuzhiyun }
3231*4882a593Smuzhiyun
3232*4882a593Smuzhiyun
3233*4882a593Smuzhiyun /* block_til_ready()
3234*4882a593Smuzhiyun *
3235*4882a593Smuzhiyun * Block the current process until the specified port
3236*4882a593Smuzhiyun * is ready to be opened.
3237*4882a593Smuzhiyun *
3238*4882a593Smuzhiyun * Arguments:
3239*4882a593Smuzhiyun *
3240*4882a593Smuzhiyun * tty pointer to tty info structure
3241*4882a593Smuzhiyun * filp pointer to open file object
3242*4882a593Smuzhiyun * info pointer to device instance data
3243*4882a593Smuzhiyun *
3244*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
3245*4882a593Smuzhiyun */
block_til_ready(struct tty_struct * tty,struct file * filp,struct mgsl_struct * info)3246*4882a593Smuzhiyun static int block_til_ready(struct tty_struct *tty, struct file * filp,
3247*4882a593Smuzhiyun struct mgsl_struct *info)
3248*4882a593Smuzhiyun {
3249*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
3250*4882a593Smuzhiyun int retval;
3251*4882a593Smuzhiyun bool do_clocal = false;
3252*4882a593Smuzhiyun unsigned long flags;
3253*4882a593Smuzhiyun int dcd;
3254*4882a593Smuzhiyun struct tty_port *port = &info->port;
3255*4882a593Smuzhiyun
3256*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3257*4882a593Smuzhiyun printk("%s(%d):block_til_ready on %s\n",
3258*4882a593Smuzhiyun __FILE__,__LINE__, tty->driver->name );
3259*4882a593Smuzhiyun
3260*4882a593Smuzhiyun if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
3261*4882a593Smuzhiyun /* nonblock mode is set or port is not enabled */
3262*4882a593Smuzhiyun tty_port_set_active(port, 1);
3263*4882a593Smuzhiyun return 0;
3264*4882a593Smuzhiyun }
3265*4882a593Smuzhiyun
3266*4882a593Smuzhiyun if (C_CLOCAL(tty))
3267*4882a593Smuzhiyun do_clocal = true;
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun /* Wait for carrier detect and the line to become
3270*4882a593Smuzhiyun * free (i.e., not in use by the callout). While we are in
3271*4882a593Smuzhiyun * this loop, port->count is dropped by one, so that
3272*4882a593Smuzhiyun * mgsl_close() knows when to free things. We restore it upon
3273*4882a593Smuzhiyun * exit, either normal or abnormal.
3274*4882a593Smuzhiyun */
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun retval = 0;
3277*4882a593Smuzhiyun add_wait_queue(&port->open_wait, &wait);
3278*4882a593Smuzhiyun
3279*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3280*4882a593Smuzhiyun printk("%s(%d):block_til_ready before block on %s count=%d\n",
3281*4882a593Smuzhiyun __FILE__,__LINE__, tty->driver->name, port->count );
3282*4882a593Smuzhiyun
3283*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock, flags);
3284*4882a593Smuzhiyun port->count--;
3285*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock, flags);
3286*4882a593Smuzhiyun port->blocked_open++;
3287*4882a593Smuzhiyun
3288*4882a593Smuzhiyun while (1) {
3289*4882a593Smuzhiyun if (C_BAUD(tty) && tty_port_initialized(port))
3290*4882a593Smuzhiyun tty_port_raise_dtr_rts(port);
3291*4882a593Smuzhiyun
3292*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
3293*4882a593Smuzhiyun
3294*4882a593Smuzhiyun if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
3295*4882a593Smuzhiyun retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3296*4882a593Smuzhiyun -EAGAIN : -ERESTARTSYS;
3297*4882a593Smuzhiyun break;
3298*4882a593Smuzhiyun }
3299*4882a593Smuzhiyun
3300*4882a593Smuzhiyun dcd = tty_port_carrier_raised(&info->port);
3301*4882a593Smuzhiyun if (do_clocal || dcd)
3302*4882a593Smuzhiyun break;
3303*4882a593Smuzhiyun
3304*4882a593Smuzhiyun if (signal_pending(current)) {
3305*4882a593Smuzhiyun retval = -ERESTARTSYS;
3306*4882a593Smuzhiyun break;
3307*4882a593Smuzhiyun }
3308*4882a593Smuzhiyun
3309*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3310*4882a593Smuzhiyun printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3311*4882a593Smuzhiyun __FILE__,__LINE__, tty->driver->name, port->count );
3312*4882a593Smuzhiyun
3313*4882a593Smuzhiyun tty_unlock(tty);
3314*4882a593Smuzhiyun schedule();
3315*4882a593Smuzhiyun tty_lock(tty);
3316*4882a593Smuzhiyun }
3317*4882a593Smuzhiyun
3318*4882a593Smuzhiyun set_current_state(TASK_RUNNING);
3319*4882a593Smuzhiyun remove_wait_queue(&port->open_wait, &wait);
3320*4882a593Smuzhiyun
3321*4882a593Smuzhiyun /* FIXME: Racy on hangup during close wait */
3322*4882a593Smuzhiyun if (!tty_hung_up_p(filp))
3323*4882a593Smuzhiyun port->count++;
3324*4882a593Smuzhiyun port->blocked_open--;
3325*4882a593Smuzhiyun
3326*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3327*4882a593Smuzhiyun printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3328*4882a593Smuzhiyun __FILE__,__LINE__, tty->driver->name, port->count );
3329*4882a593Smuzhiyun
3330*4882a593Smuzhiyun if (!retval)
3331*4882a593Smuzhiyun tty_port_set_active(port, 1);
3332*4882a593Smuzhiyun
3333*4882a593Smuzhiyun return retval;
3334*4882a593Smuzhiyun
3335*4882a593Smuzhiyun } /* end of block_til_ready() */
3336*4882a593Smuzhiyun
mgsl_install(struct tty_driver * driver,struct tty_struct * tty)3337*4882a593Smuzhiyun static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty)
3338*4882a593Smuzhiyun {
3339*4882a593Smuzhiyun struct mgsl_struct *info;
3340*4882a593Smuzhiyun int line = tty->index;
3341*4882a593Smuzhiyun
3342*4882a593Smuzhiyun /* verify range of specified line number */
3343*4882a593Smuzhiyun if (line >= mgsl_device_count) {
3344*4882a593Smuzhiyun printk("%s(%d):mgsl_open with invalid line #%d.\n",
3345*4882a593Smuzhiyun __FILE__, __LINE__, line);
3346*4882a593Smuzhiyun return -ENODEV;
3347*4882a593Smuzhiyun }
3348*4882a593Smuzhiyun
3349*4882a593Smuzhiyun /* find the info structure for the specified line */
3350*4882a593Smuzhiyun info = mgsl_device_list;
3351*4882a593Smuzhiyun while (info && info->line != line)
3352*4882a593Smuzhiyun info = info->next_device;
3353*4882a593Smuzhiyun if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3354*4882a593Smuzhiyun return -ENODEV;
3355*4882a593Smuzhiyun tty->driver_data = info;
3356*4882a593Smuzhiyun
3357*4882a593Smuzhiyun return tty_port_install(&info->port, driver, tty);
3358*4882a593Smuzhiyun }
3359*4882a593Smuzhiyun
3360*4882a593Smuzhiyun /* mgsl_open()
3361*4882a593Smuzhiyun *
3362*4882a593Smuzhiyun * Called when a port is opened. Init and enable port.
3363*4882a593Smuzhiyun * Perform serial-specific initialization for the tty structure.
3364*4882a593Smuzhiyun *
3365*4882a593Smuzhiyun * Arguments: tty pointer to tty info structure
3366*4882a593Smuzhiyun * filp associated file pointer
3367*4882a593Smuzhiyun *
3368*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error code
3369*4882a593Smuzhiyun */
mgsl_open(struct tty_struct * tty,struct file * filp)3370*4882a593Smuzhiyun static int mgsl_open(struct tty_struct *tty, struct file * filp)
3371*4882a593Smuzhiyun {
3372*4882a593Smuzhiyun struct mgsl_struct *info = tty->driver_data;
3373*4882a593Smuzhiyun unsigned long flags;
3374*4882a593Smuzhiyun int retval;
3375*4882a593Smuzhiyun
3376*4882a593Smuzhiyun info->port.tty = tty;
3377*4882a593Smuzhiyun
3378*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3379*4882a593Smuzhiyun printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3380*4882a593Smuzhiyun __FILE__,__LINE__,tty->driver->name, info->port.count);
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3383*4882a593Smuzhiyun
3384*4882a593Smuzhiyun spin_lock_irqsave(&info->netlock, flags);
3385*4882a593Smuzhiyun if (info->netcount) {
3386*4882a593Smuzhiyun retval = -EBUSY;
3387*4882a593Smuzhiyun spin_unlock_irqrestore(&info->netlock, flags);
3388*4882a593Smuzhiyun goto cleanup;
3389*4882a593Smuzhiyun }
3390*4882a593Smuzhiyun info->port.count++;
3391*4882a593Smuzhiyun spin_unlock_irqrestore(&info->netlock, flags);
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun if (info->port.count == 1) {
3394*4882a593Smuzhiyun /* 1st open on this device, init hardware */
3395*4882a593Smuzhiyun retval = startup(info);
3396*4882a593Smuzhiyun if (retval < 0)
3397*4882a593Smuzhiyun goto cleanup;
3398*4882a593Smuzhiyun }
3399*4882a593Smuzhiyun
3400*4882a593Smuzhiyun retval = block_til_ready(tty, filp, info);
3401*4882a593Smuzhiyun if (retval) {
3402*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3403*4882a593Smuzhiyun printk("%s(%d):block_til_ready(%s) returned %d\n",
3404*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name, retval);
3405*4882a593Smuzhiyun goto cleanup;
3406*4882a593Smuzhiyun }
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
3409*4882a593Smuzhiyun printk("%s(%d):mgsl_open(%s) success\n",
3410*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name);
3411*4882a593Smuzhiyun retval = 0;
3412*4882a593Smuzhiyun
3413*4882a593Smuzhiyun cleanup:
3414*4882a593Smuzhiyun if (retval) {
3415*4882a593Smuzhiyun if (tty->count == 1)
3416*4882a593Smuzhiyun info->port.tty = NULL; /* tty layer will release tty struct */
3417*4882a593Smuzhiyun if(info->port.count)
3418*4882a593Smuzhiyun info->port.count--;
3419*4882a593Smuzhiyun }
3420*4882a593Smuzhiyun
3421*4882a593Smuzhiyun return retval;
3422*4882a593Smuzhiyun
3423*4882a593Smuzhiyun } /* end of mgsl_open() */
3424*4882a593Smuzhiyun
3425*4882a593Smuzhiyun /*
3426*4882a593Smuzhiyun * /proc fs routines....
3427*4882a593Smuzhiyun */
3428*4882a593Smuzhiyun
line_info(struct seq_file * m,struct mgsl_struct * info)3429*4882a593Smuzhiyun static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3430*4882a593Smuzhiyun {
3431*4882a593Smuzhiyun char stat_buf[30];
3432*4882a593Smuzhiyun unsigned long flags;
3433*4882a593Smuzhiyun
3434*4882a593Smuzhiyun seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3435*4882a593Smuzhiyun info->device_name, info->io_base, info->irq_level,
3436*4882a593Smuzhiyun info->phys_memory_base, info->phys_lcr_base);
3437*4882a593Smuzhiyun
3438*4882a593Smuzhiyun /* output current serial signal states */
3439*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
3440*4882a593Smuzhiyun usc_get_serial_signals(info);
3441*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
3442*4882a593Smuzhiyun
3443*4882a593Smuzhiyun stat_buf[0] = 0;
3444*4882a593Smuzhiyun stat_buf[1] = 0;
3445*4882a593Smuzhiyun if (info->serial_signals & SerialSignal_RTS)
3446*4882a593Smuzhiyun strcat(stat_buf, "|RTS");
3447*4882a593Smuzhiyun if (info->serial_signals & SerialSignal_CTS)
3448*4882a593Smuzhiyun strcat(stat_buf, "|CTS");
3449*4882a593Smuzhiyun if (info->serial_signals & SerialSignal_DTR)
3450*4882a593Smuzhiyun strcat(stat_buf, "|DTR");
3451*4882a593Smuzhiyun if (info->serial_signals & SerialSignal_DSR)
3452*4882a593Smuzhiyun strcat(stat_buf, "|DSR");
3453*4882a593Smuzhiyun if (info->serial_signals & SerialSignal_DCD)
3454*4882a593Smuzhiyun strcat(stat_buf, "|CD");
3455*4882a593Smuzhiyun if (info->serial_signals & SerialSignal_RI)
3456*4882a593Smuzhiyun strcat(stat_buf, "|RI");
3457*4882a593Smuzhiyun
3458*4882a593Smuzhiyun if (info->params.mode == MGSL_MODE_HDLC ||
3459*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW ) {
3460*4882a593Smuzhiyun seq_printf(m, " HDLC txok:%d rxok:%d",
3461*4882a593Smuzhiyun info->icount.txok, info->icount.rxok);
3462*4882a593Smuzhiyun if (info->icount.txunder)
3463*4882a593Smuzhiyun seq_printf(m, " txunder:%d", info->icount.txunder);
3464*4882a593Smuzhiyun if (info->icount.txabort)
3465*4882a593Smuzhiyun seq_printf(m, " txabort:%d", info->icount.txabort);
3466*4882a593Smuzhiyun if (info->icount.rxshort)
3467*4882a593Smuzhiyun seq_printf(m, " rxshort:%d", info->icount.rxshort);
3468*4882a593Smuzhiyun if (info->icount.rxlong)
3469*4882a593Smuzhiyun seq_printf(m, " rxlong:%d", info->icount.rxlong);
3470*4882a593Smuzhiyun if (info->icount.rxover)
3471*4882a593Smuzhiyun seq_printf(m, " rxover:%d", info->icount.rxover);
3472*4882a593Smuzhiyun if (info->icount.rxcrc)
3473*4882a593Smuzhiyun seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3474*4882a593Smuzhiyun } else {
3475*4882a593Smuzhiyun seq_printf(m, " ASYNC tx:%d rx:%d",
3476*4882a593Smuzhiyun info->icount.tx, info->icount.rx);
3477*4882a593Smuzhiyun if (info->icount.frame)
3478*4882a593Smuzhiyun seq_printf(m, " fe:%d", info->icount.frame);
3479*4882a593Smuzhiyun if (info->icount.parity)
3480*4882a593Smuzhiyun seq_printf(m, " pe:%d", info->icount.parity);
3481*4882a593Smuzhiyun if (info->icount.brk)
3482*4882a593Smuzhiyun seq_printf(m, " brk:%d", info->icount.brk);
3483*4882a593Smuzhiyun if (info->icount.overrun)
3484*4882a593Smuzhiyun seq_printf(m, " oe:%d", info->icount.overrun);
3485*4882a593Smuzhiyun }
3486*4882a593Smuzhiyun
3487*4882a593Smuzhiyun /* Append serial signal status to end */
3488*4882a593Smuzhiyun seq_printf(m, " %s\n", stat_buf+1);
3489*4882a593Smuzhiyun
3490*4882a593Smuzhiyun seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3491*4882a593Smuzhiyun info->tx_active,info->bh_requested,info->bh_running,
3492*4882a593Smuzhiyun info->pending_bh);
3493*4882a593Smuzhiyun
3494*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
3495*4882a593Smuzhiyun {
3496*4882a593Smuzhiyun u16 Tcsr = usc_InReg( info, TCSR );
3497*4882a593Smuzhiyun u16 Tdmr = usc_InDmaReg( info, TDMR );
3498*4882a593Smuzhiyun u16 Ticr = usc_InReg( info, TICR );
3499*4882a593Smuzhiyun u16 Rscr = usc_InReg( info, RCSR );
3500*4882a593Smuzhiyun u16 Rdmr = usc_InDmaReg( info, RDMR );
3501*4882a593Smuzhiyun u16 Ricr = usc_InReg( info, RICR );
3502*4882a593Smuzhiyun u16 Icr = usc_InReg( info, ICR );
3503*4882a593Smuzhiyun u16 Dccr = usc_InReg( info, DCCR );
3504*4882a593Smuzhiyun u16 Tmr = usc_InReg( info, TMR );
3505*4882a593Smuzhiyun u16 Tccr = usc_InReg( info, TCCR );
3506*4882a593Smuzhiyun u16 Ccar = inw( info->io_base + CCAR );
3507*4882a593Smuzhiyun seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3508*4882a593Smuzhiyun "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3509*4882a593Smuzhiyun Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3510*4882a593Smuzhiyun }
3511*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
3512*4882a593Smuzhiyun }
3513*4882a593Smuzhiyun
3514*4882a593Smuzhiyun /* Called to print information about devices */
mgsl_proc_show(struct seq_file * m,void * v)3515*4882a593Smuzhiyun static int mgsl_proc_show(struct seq_file *m, void *v)
3516*4882a593Smuzhiyun {
3517*4882a593Smuzhiyun struct mgsl_struct *info;
3518*4882a593Smuzhiyun
3519*4882a593Smuzhiyun seq_printf(m, "synclink driver:%s\n", driver_version);
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun info = mgsl_device_list;
3522*4882a593Smuzhiyun while( info ) {
3523*4882a593Smuzhiyun line_info(m, info);
3524*4882a593Smuzhiyun info = info->next_device;
3525*4882a593Smuzhiyun }
3526*4882a593Smuzhiyun return 0;
3527*4882a593Smuzhiyun }
3528*4882a593Smuzhiyun
3529*4882a593Smuzhiyun /* mgsl_allocate_dma_buffers()
3530*4882a593Smuzhiyun *
3531*4882a593Smuzhiyun * Allocate and format DMA buffers (ISA adapter)
3532*4882a593Smuzhiyun * or format shared memory buffers (PCI adapter).
3533*4882a593Smuzhiyun *
3534*4882a593Smuzhiyun * Arguments: info pointer to device instance data
3535*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error
3536*4882a593Smuzhiyun */
mgsl_allocate_dma_buffers(struct mgsl_struct * info)3537*4882a593Smuzhiyun static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3538*4882a593Smuzhiyun {
3539*4882a593Smuzhiyun unsigned short BuffersPerFrame;
3540*4882a593Smuzhiyun
3541*4882a593Smuzhiyun info->last_mem_alloc = 0;
3542*4882a593Smuzhiyun
3543*4882a593Smuzhiyun /* Calculate the number of DMA buffers necessary to hold the */
3544*4882a593Smuzhiyun /* largest allowable frame size. Note: If the max frame size is */
3545*4882a593Smuzhiyun /* not an even multiple of the DMA buffer size then we need to */
3546*4882a593Smuzhiyun /* round the buffer count per frame up one. */
3547*4882a593Smuzhiyun
3548*4882a593Smuzhiyun BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3549*4882a593Smuzhiyun if ( info->max_frame_size % DMABUFFERSIZE )
3550*4882a593Smuzhiyun BuffersPerFrame++;
3551*4882a593Smuzhiyun
3552*4882a593Smuzhiyun /*
3553*4882a593Smuzhiyun * The PCI adapter has 256KBytes of shared memory to use. This is 64
3554*4882a593Smuzhiyun * PAGE_SIZE buffers.
3555*4882a593Smuzhiyun *
3556*4882a593Smuzhiyun * The first page is used for padding at this time so the buffer list
3557*4882a593Smuzhiyun * does not begin at offset 0 of the PCI adapter's shared memory.
3558*4882a593Smuzhiyun *
3559*4882a593Smuzhiyun * The 2nd page is used for the buffer list. A 4K buffer list can hold
3560*4882a593Smuzhiyun * 128 DMA_BUFFER structures at 32 bytes each.
3561*4882a593Smuzhiyun *
3562*4882a593Smuzhiyun * This leaves 62 4K pages.
3563*4882a593Smuzhiyun *
3564*4882a593Smuzhiyun * The next N pages are used for transmit frame(s). We reserve enough
3565*4882a593Smuzhiyun * 4K page blocks to hold the required number of transmit dma buffers
3566*4882a593Smuzhiyun * (num_tx_dma_buffers), each of MaxFrameSize size.
3567*4882a593Smuzhiyun *
3568*4882a593Smuzhiyun * Of the remaining pages (62-N), determine how many can be used to
3569*4882a593Smuzhiyun * receive full MaxFrameSize inbound frames
3570*4882a593Smuzhiyun */
3571*4882a593Smuzhiyun info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3572*4882a593Smuzhiyun info->rx_buffer_count = 62 - info->tx_buffer_count;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
3575*4882a593Smuzhiyun printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3576*4882a593Smuzhiyun __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3577*4882a593Smuzhiyun
3578*4882a593Smuzhiyun if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3579*4882a593Smuzhiyun mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3580*4882a593Smuzhiyun mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3581*4882a593Smuzhiyun mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3582*4882a593Smuzhiyun mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3583*4882a593Smuzhiyun printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3584*4882a593Smuzhiyun return -ENOMEM;
3585*4882a593Smuzhiyun }
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun mgsl_reset_rx_dma_buffers( info );
3588*4882a593Smuzhiyun mgsl_reset_tx_dma_buffers( info );
3589*4882a593Smuzhiyun
3590*4882a593Smuzhiyun return 0;
3591*4882a593Smuzhiyun
3592*4882a593Smuzhiyun } /* end of mgsl_allocate_dma_buffers() */
3593*4882a593Smuzhiyun
3594*4882a593Smuzhiyun /*
3595*4882a593Smuzhiyun * mgsl_alloc_buffer_list_memory()
3596*4882a593Smuzhiyun *
3597*4882a593Smuzhiyun * Allocate a common DMA buffer for use as the
3598*4882a593Smuzhiyun * receive and transmit buffer lists.
3599*4882a593Smuzhiyun *
3600*4882a593Smuzhiyun * A buffer list is a set of buffer entries where each entry contains
3601*4882a593Smuzhiyun * a pointer to an actual buffer and a pointer to the next buffer entry
3602*4882a593Smuzhiyun * (plus some other info about the buffer).
3603*4882a593Smuzhiyun *
3604*4882a593Smuzhiyun * The buffer entries for a list are built to form a circular list so
3605*4882a593Smuzhiyun * that when the entire list has been traversed you start back at the
3606*4882a593Smuzhiyun * beginning.
3607*4882a593Smuzhiyun *
3608*4882a593Smuzhiyun * This function allocates memory for just the buffer entries.
3609*4882a593Smuzhiyun * The links (pointer to next entry) are filled in with the physical
3610*4882a593Smuzhiyun * address of the next entry so the adapter can navigate the list
3611*4882a593Smuzhiyun * using bus master DMA. The pointers to the actual buffers are filled
3612*4882a593Smuzhiyun * out later when the actual buffers are allocated.
3613*4882a593Smuzhiyun *
3614*4882a593Smuzhiyun * Arguments: info pointer to device instance data
3615*4882a593Smuzhiyun * Return Value: 0 if success, otherwise error
3616*4882a593Smuzhiyun */
mgsl_alloc_buffer_list_memory(struct mgsl_struct * info)3617*4882a593Smuzhiyun static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3618*4882a593Smuzhiyun {
3619*4882a593Smuzhiyun unsigned int i;
3620*4882a593Smuzhiyun
3621*4882a593Smuzhiyun /* PCI adapter uses shared memory. */
3622*4882a593Smuzhiyun info->buffer_list = info->memory_base + info->last_mem_alloc;
3623*4882a593Smuzhiyun info->buffer_list_phys = info->last_mem_alloc;
3624*4882a593Smuzhiyun info->last_mem_alloc += BUFFERLISTSIZE;
3625*4882a593Smuzhiyun
3626*4882a593Smuzhiyun /* We got the memory for the buffer entry lists. */
3627*4882a593Smuzhiyun /* Initialize the memory block to all zeros. */
3628*4882a593Smuzhiyun memset( info->buffer_list, 0, BUFFERLISTSIZE );
3629*4882a593Smuzhiyun
3630*4882a593Smuzhiyun /* Save virtual address pointers to the receive and */
3631*4882a593Smuzhiyun /* transmit buffer lists. (Receive 1st). These pointers will */
3632*4882a593Smuzhiyun /* be used by the processor to access the lists. */
3633*4882a593Smuzhiyun info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3634*4882a593Smuzhiyun info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3635*4882a593Smuzhiyun info->tx_buffer_list += info->rx_buffer_count;
3636*4882a593Smuzhiyun
3637*4882a593Smuzhiyun /*
3638*4882a593Smuzhiyun * Build the links for the buffer entry lists such that
3639*4882a593Smuzhiyun * two circular lists are built. (Transmit and Receive).
3640*4882a593Smuzhiyun *
3641*4882a593Smuzhiyun * Note: the links are physical addresses
3642*4882a593Smuzhiyun * which are read by the adapter to determine the next
3643*4882a593Smuzhiyun * buffer entry to use.
3644*4882a593Smuzhiyun */
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun for ( i = 0; i < info->rx_buffer_count; i++ ) {
3647*4882a593Smuzhiyun /* calculate and store physical address of this buffer entry */
3648*4882a593Smuzhiyun info->rx_buffer_list[i].phys_entry =
3649*4882a593Smuzhiyun info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3650*4882a593Smuzhiyun
3651*4882a593Smuzhiyun /* calculate and store physical address of */
3652*4882a593Smuzhiyun /* next entry in cirular list of entries */
3653*4882a593Smuzhiyun
3654*4882a593Smuzhiyun info->rx_buffer_list[i].link = info->buffer_list_phys;
3655*4882a593Smuzhiyun
3656*4882a593Smuzhiyun if ( i < info->rx_buffer_count - 1 )
3657*4882a593Smuzhiyun info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3658*4882a593Smuzhiyun }
3659*4882a593Smuzhiyun
3660*4882a593Smuzhiyun for ( i = 0; i < info->tx_buffer_count; i++ ) {
3661*4882a593Smuzhiyun /* calculate and store physical address of this buffer entry */
3662*4882a593Smuzhiyun info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3663*4882a593Smuzhiyun ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3664*4882a593Smuzhiyun
3665*4882a593Smuzhiyun /* calculate and store physical address of */
3666*4882a593Smuzhiyun /* next entry in cirular list of entries */
3667*4882a593Smuzhiyun
3668*4882a593Smuzhiyun info->tx_buffer_list[i].link = info->buffer_list_phys +
3669*4882a593Smuzhiyun info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3670*4882a593Smuzhiyun
3671*4882a593Smuzhiyun if ( i < info->tx_buffer_count - 1 )
3672*4882a593Smuzhiyun info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3673*4882a593Smuzhiyun }
3674*4882a593Smuzhiyun
3675*4882a593Smuzhiyun return 0;
3676*4882a593Smuzhiyun
3677*4882a593Smuzhiyun } /* end of mgsl_alloc_buffer_list_memory() */
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun /* Free DMA buffers allocated for use as the
3680*4882a593Smuzhiyun * receive and transmit buffer lists.
3681*4882a593Smuzhiyun * Warning:
3682*4882a593Smuzhiyun *
3683*4882a593Smuzhiyun * The data transfer buffers associated with the buffer list
3684*4882a593Smuzhiyun * MUST be freed before freeing the buffer list itself because
3685*4882a593Smuzhiyun * the buffer list contains the information necessary to free
3686*4882a593Smuzhiyun * the individual buffers!
3687*4882a593Smuzhiyun */
mgsl_free_buffer_list_memory(struct mgsl_struct * info)3688*4882a593Smuzhiyun static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3689*4882a593Smuzhiyun {
3690*4882a593Smuzhiyun info->buffer_list = NULL;
3691*4882a593Smuzhiyun info->rx_buffer_list = NULL;
3692*4882a593Smuzhiyun info->tx_buffer_list = NULL;
3693*4882a593Smuzhiyun
3694*4882a593Smuzhiyun } /* end of mgsl_free_buffer_list_memory() */
3695*4882a593Smuzhiyun
3696*4882a593Smuzhiyun /*
3697*4882a593Smuzhiyun * mgsl_alloc_frame_memory()
3698*4882a593Smuzhiyun *
3699*4882a593Smuzhiyun * Allocate the frame DMA buffers used by the specified buffer list.
3700*4882a593Smuzhiyun * Each DMA buffer will be one memory page in size. This is necessary
3701*4882a593Smuzhiyun * because memory can fragment enough that it may be impossible
3702*4882a593Smuzhiyun * contiguous pages.
3703*4882a593Smuzhiyun *
3704*4882a593Smuzhiyun * Arguments:
3705*4882a593Smuzhiyun *
3706*4882a593Smuzhiyun * info pointer to device instance data
3707*4882a593Smuzhiyun * BufferList pointer to list of buffer entries
3708*4882a593Smuzhiyun * Buffercount count of buffer entries in buffer list
3709*4882a593Smuzhiyun *
3710*4882a593Smuzhiyun * Return Value: 0 if success, otherwise -ENOMEM
3711*4882a593Smuzhiyun */
mgsl_alloc_frame_memory(struct mgsl_struct * info,DMABUFFERENTRY * BufferList,int Buffercount)3712*4882a593Smuzhiyun static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3713*4882a593Smuzhiyun {
3714*4882a593Smuzhiyun int i;
3715*4882a593Smuzhiyun
3716*4882a593Smuzhiyun /* Allocate page sized buffers for the receive buffer list */
3717*4882a593Smuzhiyun
3718*4882a593Smuzhiyun for ( i = 0; i < Buffercount; i++ ) {
3719*4882a593Smuzhiyun BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3720*4882a593Smuzhiyun BufferList[i].phys_addr = info->last_mem_alloc;
3721*4882a593Smuzhiyun info->last_mem_alloc += DMABUFFERSIZE;
3722*4882a593Smuzhiyun }
3723*4882a593Smuzhiyun
3724*4882a593Smuzhiyun return 0;
3725*4882a593Smuzhiyun
3726*4882a593Smuzhiyun } /* end of mgsl_alloc_frame_memory() */
3727*4882a593Smuzhiyun
3728*4882a593Smuzhiyun /*
3729*4882a593Smuzhiyun * mgsl_free_frame_memory()
3730*4882a593Smuzhiyun *
3731*4882a593Smuzhiyun * Free the buffers associated with
3732*4882a593Smuzhiyun * each buffer entry of a buffer list.
3733*4882a593Smuzhiyun *
3734*4882a593Smuzhiyun * Arguments:
3735*4882a593Smuzhiyun *
3736*4882a593Smuzhiyun * info pointer to device instance data
3737*4882a593Smuzhiyun * BufferList pointer to list of buffer entries
3738*4882a593Smuzhiyun * Buffercount count of buffer entries in buffer list
3739*4882a593Smuzhiyun *
3740*4882a593Smuzhiyun * Return Value: None
3741*4882a593Smuzhiyun */
mgsl_free_frame_memory(struct mgsl_struct * info,DMABUFFERENTRY * BufferList,int Buffercount)3742*4882a593Smuzhiyun static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3743*4882a593Smuzhiyun {
3744*4882a593Smuzhiyun int i;
3745*4882a593Smuzhiyun
3746*4882a593Smuzhiyun if ( BufferList ) {
3747*4882a593Smuzhiyun for ( i = 0 ; i < Buffercount ; i++ ) {
3748*4882a593Smuzhiyun if ( BufferList[i].virt_addr ) {
3749*4882a593Smuzhiyun BufferList[i].virt_addr = NULL;
3750*4882a593Smuzhiyun }
3751*4882a593Smuzhiyun }
3752*4882a593Smuzhiyun }
3753*4882a593Smuzhiyun
3754*4882a593Smuzhiyun } /* end of mgsl_free_frame_memory() */
3755*4882a593Smuzhiyun
3756*4882a593Smuzhiyun /* mgsl_free_dma_buffers()
3757*4882a593Smuzhiyun *
3758*4882a593Smuzhiyun * Free DMA buffers
3759*4882a593Smuzhiyun *
3760*4882a593Smuzhiyun * Arguments: info pointer to device instance data
3761*4882a593Smuzhiyun * Return Value: None
3762*4882a593Smuzhiyun */
mgsl_free_dma_buffers(struct mgsl_struct * info)3763*4882a593Smuzhiyun static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3764*4882a593Smuzhiyun {
3765*4882a593Smuzhiyun mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3766*4882a593Smuzhiyun mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3767*4882a593Smuzhiyun mgsl_free_buffer_list_memory( info );
3768*4882a593Smuzhiyun
3769*4882a593Smuzhiyun } /* end of mgsl_free_dma_buffers() */
3770*4882a593Smuzhiyun
3771*4882a593Smuzhiyun
3772*4882a593Smuzhiyun /*
3773*4882a593Smuzhiyun * mgsl_alloc_intermediate_rxbuffer_memory()
3774*4882a593Smuzhiyun *
3775*4882a593Smuzhiyun * Allocate a buffer large enough to hold max_frame_size. This buffer
3776*4882a593Smuzhiyun * is used to pass an assembled frame to the line discipline.
3777*4882a593Smuzhiyun *
3778*4882a593Smuzhiyun * Arguments:
3779*4882a593Smuzhiyun *
3780*4882a593Smuzhiyun * info pointer to device instance data
3781*4882a593Smuzhiyun *
3782*4882a593Smuzhiyun * Return Value: 0 if success, otherwise -ENOMEM
3783*4882a593Smuzhiyun */
mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct * info)3784*4882a593Smuzhiyun static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3785*4882a593Smuzhiyun {
3786*4882a593Smuzhiyun info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3787*4882a593Smuzhiyun if ( info->intermediate_rxbuffer == NULL )
3788*4882a593Smuzhiyun return -ENOMEM;
3789*4882a593Smuzhiyun /* unused flag buffer to satisfy receive_buf calling interface */
3790*4882a593Smuzhiyun info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
3791*4882a593Smuzhiyun if (!info->flag_buf) {
3792*4882a593Smuzhiyun kfree(info->intermediate_rxbuffer);
3793*4882a593Smuzhiyun info->intermediate_rxbuffer = NULL;
3794*4882a593Smuzhiyun return -ENOMEM;
3795*4882a593Smuzhiyun }
3796*4882a593Smuzhiyun return 0;
3797*4882a593Smuzhiyun
3798*4882a593Smuzhiyun } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3799*4882a593Smuzhiyun
3800*4882a593Smuzhiyun /*
3801*4882a593Smuzhiyun * mgsl_free_intermediate_rxbuffer_memory()
3802*4882a593Smuzhiyun *
3803*4882a593Smuzhiyun *
3804*4882a593Smuzhiyun * Arguments:
3805*4882a593Smuzhiyun *
3806*4882a593Smuzhiyun * info pointer to device instance data
3807*4882a593Smuzhiyun *
3808*4882a593Smuzhiyun * Return Value: None
3809*4882a593Smuzhiyun */
mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct * info)3810*4882a593Smuzhiyun static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3811*4882a593Smuzhiyun {
3812*4882a593Smuzhiyun kfree(info->intermediate_rxbuffer);
3813*4882a593Smuzhiyun info->intermediate_rxbuffer = NULL;
3814*4882a593Smuzhiyun kfree(info->flag_buf);
3815*4882a593Smuzhiyun info->flag_buf = NULL;
3816*4882a593Smuzhiyun
3817*4882a593Smuzhiyun } /* end of mgsl_free_intermediate_rxbuffer_memory() */
3818*4882a593Smuzhiyun
3819*4882a593Smuzhiyun /*
3820*4882a593Smuzhiyun * mgsl_alloc_intermediate_txbuffer_memory()
3821*4882a593Smuzhiyun *
3822*4882a593Smuzhiyun * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3823*4882a593Smuzhiyun * This buffer is used to load transmit frames into the adapter's dma transfer
3824*4882a593Smuzhiyun * buffers when there is sufficient space.
3825*4882a593Smuzhiyun *
3826*4882a593Smuzhiyun * Arguments:
3827*4882a593Smuzhiyun *
3828*4882a593Smuzhiyun * info pointer to device instance data
3829*4882a593Smuzhiyun *
3830*4882a593Smuzhiyun * Return Value: 0 if success, otherwise -ENOMEM
3831*4882a593Smuzhiyun */
mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct * info)3832*4882a593Smuzhiyun static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3833*4882a593Smuzhiyun {
3834*4882a593Smuzhiyun int i;
3835*4882a593Smuzhiyun
3836*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
3837*4882a593Smuzhiyun printk("%s %s(%d) allocating %d tx holding buffers\n",
3838*4882a593Smuzhiyun info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3839*4882a593Smuzhiyun
3840*4882a593Smuzhiyun memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3841*4882a593Smuzhiyun
3842*4882a593Smuzhiyun for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3843*4882a593Smuzhiyun info->tx_holding_buffers[i].buffer =
3844*4882a593Smuzhiyun kmalloc(info->max_frame_size, GFP_KERNEL);
3845*4882a593Smuzhiyun if (info->tx_holding_buffers[i].buffer == NULL) {
3846*4882a593Smuzhiyun for (--i; i >= 0; i--) {
3847*4882a593Smuzhiyun kfree(info->tx_holding_buffers[i].buffer);
3848*4882a593Smuzhiyun info->tx_holding_buffers[i].buffer = NULL;
3849*4882a593Smuzhiyun }
3850*4882a593Smuzhiyun return -ENOMEM;
3851*4882a593Smuzhiyun }
3852*4882a593Smuzhiyun }
3853*4882a593Smuzhiyun
3854*4882a593Smuzhiyun return 0;
3855*4882a593Smuzhiyun
3856*4882a593Smuzhiyun } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3857*4882a593Smuzhiyun
3858*4882a593Smuzhiyun /*
3859*4882a593Smuzhiyun * mgsl_free_intermediate_txbuffer_memory()
3860*4882a593Smuzhiyun *
3861*4882a593Smuzhiyun *
3862*4882a593Smuzhiyun * Arguments:
3863*4882a593Smuzhiyun *
3864*4882a593Smuzhiyun * info pointer to device instance data
3865*4882a593Smuzhiyun *
3866*4882a593Smuzhiyun * Return Value: None
3867*4882a593Smuzhiyun */
mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct * info)3868*4882a593Smuzhiyun static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3869*4882a593Smuzhiyun {
3870*4882a593Smuzhiyun int i;
3871*4882a593Smuzhiyun
3872*4882a593Smuzhiyun for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3873*4882a593Smuzhiyun kfree(info->tx_holding_buffers[i].buffer);
3874*4882a593Smuzhiyun info->tx_holding_buffers[i].buffer = NULL;
3875*4882a593Smuzhiyun }
3876*4882a593Smuzhiyun
3877*4882a593Smuzhiyun info->get_tx_holding_index = 0;
3878*4882a593Smuzhiyun info->put_tx_holding_index = 0;
3879*4882a593Smuzhiyun info->tx_holding_count = 0;
3880*4882a593Smuzhiyun
3881*4882a593Smuzhiyun } /* end of mgsl_free_intermediate_txbuffer_memory() */
3882*4882a593Smuzhiyun
3883*4882a593Smuzhiyun
3884*4882a593Smuzhiyun /*
3885*4882a593Smuzhiyun * load_next_tx_holding_buffer()
3886*4882a593Smuzhiyun *
3887*4882a593Smuzhiyun * attempts to load the next buffered tx request into the
3888*4882a593Smuzhiyun * tx dma buffers
3889*4882a593Smuzhiyun *
3890*4882a593Smuzhiyun * Arguments:
3891*4882a593Smuzhiyun *
3892*4882a593Smuzhiyun * info pointer to device instance data
3893*4882a593Smuzhiyun *
3894*4882a593Smuzhiyun * Return Value: true if next buffered tx request loaded
3895*4882a593Smuzhiyun * into adapter's tx dma buffer,
3896*4882a593Smuzhiyun * false otherwise
3897*4882a593Smuzhiyun */
load_next_tx_holding_buffer(struct mgsl_struct * info)3898*4882a593Smuzhiyun static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3899*4882a593Smuzhiyun {
3900*4882a593Smuzhiyun bool ret = false;
3901*4882a593Smuzhiyun
3902*4882a593Smuzhiyun if ( info->tx_holding_count ) {
3903*4882a593Smuzhiyun /* determine if we have enough tx dma buffers
3904*4882a593Smuzhiyun * to accommodate the next tx frame
3905*4882a593Smuzhiyun */
3906*4882a593Smuzhiyun struct tx_holding_buffer *ptx =
3907*4882a593Smuzhiyun &info->tx_holding_buffers[info->get_tx_holding_index];
3908*4882a593Smuzhiyun int num_free = num_free_tx_dma_buffers(info);
3909*4882a593Smuzhiyun int num_needed = ptx->buffer_size / DMABUFFERSIZE;
3910*4882a593Smuzhiyun if ( ptx->buffer_size % DMABUFFERSIZE )
3911*4882a593Smuzhiyun ++num_needed;
3912*4882a593Smuzhiyun
3913*4882a593Smuzhiyun if (num_needed <= num_free) {
3914*4882a593Smuzhiyun info->xmit_cnt = ptx->buffer_size;
3915*4882a593Smuzhiyun mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
3916*4882a593Smuzhiyun
3917*4882a593Smuzhiyun --info->tx_holding_count;
3918*4882a593Smuzhiyun if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
3919*4882a593Smuzhiyun info->get_tx_holding_index=0;
3920*4882a593Smuzhiyun
3921*4882a593Smuzhiyun /* restart transmit timer */
3922*4882a593Smuzhiyun mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
3923*4882a593Smuzhiyun
3924*4882a593Smuzhiyun ret = true;
3925*4882a593Smuzhiyun }
3926*4882a593Smuzhiyun }
3927*4882a593Smuzhiyun
3928*4882a593Smuzhiyun return ret;
3929*4882a593Smuzhiyun }
3930*4882a593Smuzhiyun
3931*4882a593Smuzhiyun /*
3932*4882a593Smuzhiyun * save_tx_buffer_request()
3933*4882a593Smuzhiyun *
3934*4882a593Smuzhiyun * attempt to store transmit frame request for later transmission
3935*4882a593Smuzhiyun *
3936*4882a593Smuzhiyun * Arguments:
3937*4882a593Smuzhiyun *
3938*4882a593Smuzhiyun * info pointer to device instance data
3939*4882a593Smuzhiyun * Buffer pointer to buffer containing frame to load
3940*4882a593Smuzhiyun * BufferSize size in bytes of frame in Buffer
3941*4882a593Smuzhiyun *
3942*4882a593Smuzhiyun * Return Value: 1 if able to store, 0 otherwise
3943*4882a593Smuzhiyun */
save_tx_buffer_request(struct mgsl_struct * info,const char * Buffer,unsigned int BufferSize)3944*4882a593Smuzhiyun static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
3945*4882a593Smuzhiyun {
3946*4882a593Smuzhiyun struct tx_holding_buffer *ptx;
3947*4882a593Smuzhiyun
3948*4882a593Smuzhiyun if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
3949*4882a593Smuzhiyun return 0; /* all buffers in use */
3950*4882a593Smuzhiyun }
3951*4882a593Smuzhiyun
3952*4882a593Smuzhiyun ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
3953*4882a593Smuzhiyun ptx->buffer_size = BufferSize;
3954*4882a593Smuzhiyun memcpy( ptx->buffer, Buffer, BufferSize);
3955*4882a593Smuzhiyun
3956*4882a593Smuzhiyun ++info->tx_holding_count;
3957*4882a593Smuzhiyun if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
3958*4882a593Smuzhiyun info->put_tx_holding_index=0;
3959*4882a593Smuzhiyun
3960*4882a593Smuzhiyun return 1;
3961*4882a593Smuzhiyun }
3962*4882a593Smuzhiyun
mgsl_claim_resources(struct mgsl_struct * info)3963*4882a593Smuzhiyun static int mgsl_claim_resources(struct mgsl_struct *info)
3964*4882a593Smuzhiyun {
3965*4882a593Smuzhiyun if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
3966*4882a593Smuzhiyun printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
3967*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, info->io_base);
3968*4882a593Smuzhiyun return -ENODEV;
3969*4882a593Smuzhiyun }
3970*4882a593Smuzhiyun info->io_addr_requested = true;
3971*4882a593Smuzhiyun
3972*4882a593Smuzhiyun if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
3973*4882a593Smuzhiyun info->device_name, info ) < 0 ) {
3974*4882a593Smuzhiyun printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
3975*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, info->irq_level );
3976*4882a593Smuzhiyun goto errout;
3977*4882a593Smuzhiyun }
3978*4882a593Smuzhiyun info->irq_requested = true;
3979*4882a593Smuzhiyun
3980*4882a593Smuzhiyun if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
3981*4882a593Smuzhiyun printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
3982*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, info->phys_memory_base);
3983*4882a593Smuzhiyun goto errout;
3984*4882a593Smuzhiyun }
3985*4882a593Smuzhiyun info->shared_mem_requested = true;
3986*4882a593Smuzhiyun if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
3987*4882a593Smuzhiyun printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
3988*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
3989*4882a593Smuzhiyun goto errout;
3990*4882a593Smuzhiyun }
3991*4882a593Smuzhiyun info->lcr_mem_requested = true;
3992*4882a593Smuzhiyun
3993*4882a593Smuzhiyun info->memory_base = ioremap(info->phys_memory_base, 0x40000);
3994*4882a593Smuzhiyun if (!info->memory_base) {
3995*4882a593Smuzhiyun printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
3996*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, info->phys_memory_base );
3997*4882a593Smuzhiyun goto errout;
3998*4882a593Smuzhiyun }
3999*4882a593Smuzhiyun
4000*4882a593Smuzhiyun if ( !mgsl_memory_test(info) ) {
4001*4882a593Smuzhiyun printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4002*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4003*4882a593Smuzhiyun goto errout;
4004*4882a593Smuzhiyun }
4005*4882a593Smuzhiyun
4006*4882a593Smuzhiyun info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE);
4007*4882a593Smuzhiyun if (!info->lcr_base) {
4008*4882a593Smuzhiyun printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4009*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4010*4882a593Smuzhiyun goto errout;
4011*4882a593Smuzhiyun }
4012*4882a593Smuzhiyun info->lcr_base += info->lcr_offset;
4013*4882a593Smuzhiyun
4014*4882a593Smuzhiyun if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4015*4882a593Smuzhiyun printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4016*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, info->dma_level );
4017*4882a593Smuzhiyun goto errout;
4018*4882a593Smuzhiyun }
4019*4882a593Smuzhiyun
4020*4882a593Smuzhiyun return 0;
4021*4882a593Smuzhiyun errout:
4022*4882a593Smuzhiyun mgsl_release_resources(info);
4023*4882a593Smuzhiyun return -ENODEV;
4024*4882a593Smuzhiyun
4025*4882a593Smuzhiyun } /* end of mgsl_claim_resources() */
4026*4882a593Smuzhiyun
mgsl_release_resources(struct mgsl_struct * info)4027*4882a593Smuzhiyun static void mgsl_release_resources(struct mgsl_struct *info)
4028*4882a593Smuzhiyun {
4029*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
4030*4882a593Smuzhiyun printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4031*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name );
4032*4882a593Smuzhiyun
4033*4882a593Smuzhiyun if ( info->irq_requested ) {
4034*4882a593Smuzhiyun free_irq(info->irq_level, info);
4035*4882a593Smuzhiyun info->irq_requested = false;
4036*4882a593Smuzhiyun }
4037*4882a593Smuzhiyun if ( info->dma_requested ) {
4038*4882a593Smuzhiyun disable_dma(info->dma_level);
4039*4882a593Smuzhiyun free_dma(info->dma_level);
4040*4882a593Smuzhiyun info->dma_requested = false;
4041*4882a593Smuzhiyun }
4042*4882a593Smuzhiyun mgsl_free_dma_buffers(info);
4043*4882a593Smuzhiyun mgsl_free_intermediate_rxbuffer_memory(info);
4044*4882a593Smuzhiyun mgsl_free_intermediate_txbuffer_memory(info);
4045*4882a593Smuzhiyun
4046*4882a593Smuzhiyun if ( info->io_addr_requested ) {
4047*4882a593Smuzhiyun release_region(info->io_base,info->io_addr_size);
4048*4882a593Smuzhiyun info->io_addr_requested = false;
4049*4882a593Smuzhiyun }
4050*4882a593Smuzhiyun if ( info->shared_mem_requested ) {
4051*4882a593Smuzhiyun release_mem_region(info->phys_memory_base,0x40000);
4052*4882a593Smuzhiyun info->shared_mem_requested = false;
4053*4882a593Smuzhiyun }
4054*4882a593Smuzhiyun if ( info->lcr_mem_requested ) {
4055*4882a593Smuzhiyun release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4056*4882a593Smuzhiyun info->lcr_mem_requested = false;
4057*4882a593Smuzhiyun }
4058*4882a593Smuzhiyun if (info->memory_base){
4059*4882a593Smuzhiyun iounmap(info->memory_base);
4060*4882a593Smuzhiyun info->memory_base = NULL;
4061*4882a593Smuzhiyun }
4062*4882a593Smuzhiyun if (info->lcr_base){
4063*4882a593Smuzhiyun iounmap(info->lcr_base - info->lcr_offset);
4064*4882a593Smuzhiyun info->lcr_base = NULL;
4065*4882a593Smuzhiyun }
4066*4882a593Smuzhiyun
4067*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
4068*4882a593Smuzhiyun printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4069*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name );
4070*4882a593Smuzhiyun
4071*4882a593Smuzhiyun } /* end of mgsl_release_resources() */
4072*4882a593Smuzhiyun
4073*4882a593Smuzhiyun /* mgsl_add_device()
4074*4882a593Smuzhiyun *
4075*4882a593Smuzhiyun * Add the specified device instance data structure to the
4076*4882a593Smuzhiyun * global linked list of devices and increment the device count.
4077*4882a593Smuzhiyun *
4078*4882a593Smuzhiyun * Arguments: info pointer to device instance data
4079*4882a593Smuzhiyun * Return Value: None
4080*4882a593Smuzhiyun */
mgsl_add_device(struct mgsl_struct * info)4081*4882a593Smuzhiyun static void mgsl_add_device( struct mgsl_struct *info )
4082*4882a593Smuzhiyun {
4083*4882a593Smuzhiyun info->next_device = NULL;
4084*4882a593Smuzhiyun info->line = mgsl_device_count;
4085*4882a593Smuzhiyun sprintf(info->device_name,"ttySL%d",info->line);
4086*4882a593Smuzhiyun
4087*4882a593Smuzhiyun if (info->line < MAX_TOTAL_DEVICES) {
4088*4882a593Smuzhiyun if (maxframe[info->line])
4089*4882a593Smuzhiyun info->max_frame_size = maxframe[info->line];
4090*4882a593Smuzhiyun
4091*4882a593Smuzhiyun if (txdmabufs[info->line]) {
4092*4882a593Smuzhiyun info->num_tx_dma_buffers = txdmabufs[info->line];
4093*4882a593Smuzhiyun if (info->num_tx_dma_buffers < 1)
4094*4882a593Smuzhiyun info->num_tx_dma_buffers = 1;
4095*4882a593Smuzhiyun }
4096*4882a593Smuzhiyun
4097*4882a593Smuzhiyun if (txholdbufs[info->line]) {
4098*4882a593Smuzhiyun info->num_tx_holding_buffers = txholdbufs[info->line];
4099*4882a593Smuzhiyun if (info->num_tx_holding_buffers < 1)
4100*4882a593Smuzhiyun info->num_tx_holding_buffers = 1;
4101*4882a593Smuzhiyun else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4102*4882a593Smuzhiyun info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4103*4882a593Smuzhiyun }
4104*4882a593Smuzhiyun }
4105*4882a593Smuzhiyun
4106*4882a593Smuzhiyun mgsl_device_count++;
4107*4882a593Smuzhiyun
4108*4882a593Smuzhiyun if ( !mgsl_device_list )
4109*4882a593Smuzhiyun mgsl_device_list = info;
4110*4882a593Smuzhiyun else {
4111*4882a593Smuzhiyun struct mgsl_struct *current_dev = mgsl_device_list;
4112*4882a593Smuzhiyun while( current_dev->next_device )
4113*4882a593Smuzhiyun current_dev = current_dev->next_device;
4114*4882a593Smuzhiyun current_dev->next_device = info;
4115*4882a593Smuzhiyun }
4116*4882a593Smuzhiyun
4117*4882a593Smuzhiyun if ( info->max_frame_size < 4096 )
4118*4882a593Smuzhiyun info->max_frame_size = 4096;
4119*4882a593Smuzhiyun else if ( info->max_frame_size > 65535 )
4120*4882a593Smuzhiyun info->max_frame_size = 65535;
4121*4882a593Smuzhiyun
4122*4882a593Smuzhiyun printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4123*4882a593Smuzhiyun info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4124*4882a593Smuzhiyun info->phys_memory_base, info->phys_lcr_base,
4125*4882a593Smuzhiyun info->max_frame_size );
4126*4882a593Smuzhiyun
4127*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
4128*4882a593Smuzhiyun hdlcdev_init(info);
4129*4882a593Smuzhiyun #endif
4130*4882a593Smuzhiyun
4131*4882a593Smuzhiyun } /* end of mgsl_add_device() */
4132*4882a593Smuzhiyun
4133*4882a593Smuzhiyun static const struct tty_port_operations mgsl_port_ops = {
4134*4882a593Smuzhiyun .carrier_raised = carrier_raised,
4135*4882a593Smuzhiyun .dtr_rts = dtr_rts,
4136*4882a593Smuzhiyun };
4137*4882a593Smuzhiyun
4138*4882a593Smuzhiyun
4139*4882a593Smuzhiyun /* mgsl_allocate_device()
4140*4882a593Smuzhiyun *
4141*4882a593Smuzhiyun * Allocate and initialize a device instance structure
4142*4882a593Smuzhiyun *
4143*4882a593Smuzhiyun * Arguments: none
4144*4882a593Smuzhiyun * Return Value: pointer to mgsl_struct if success, otherwise NULL
4145*4882a593Smuzhiyun */
mgsl_allocate_device(void)4146*4882a593Smuzhiyun static struct mgsl_struct* mgsl_allocate_device(void)
4147*4882a593Smuzhiyun {
4148*4882a593Smuzhiyun struct mgsl_struct *info;
4149*4882a593Smuzhiyun
4150*4882a593Smuzhiyun info = kzalloc(sizeof(struct mgsl_struct),
4151*4882a593Smuzhiyun GFP_KERNEL);
4152*4882a593Smuzhiyun
4153*4882a593Smuzhiyun if (!info) {
4154*4882a593Smuzhiyun printk("Error can't allocate device instance data\n");
4155*4882a593Smuzhiyun } else {
4156*4882a593Smuzhiyun tty_port_init(&info->port);
4157*4882a593Smuzhiyun info->port.ops = &mgsl_port_ops;
4158*4882a593Smuzhiyun info->magic = MGSL_MAGIC;
4159*4882a593Smuzhiyun INIT_WORK(&info->task, mgsl_bh_handler);
4160*4882a593Smuzhiyun info->max_frame_size = 4096;
4161*4882a593Smuzhiyun info->port.close_delay = 5*HZ/10;
4162*4882a593Smuzhiyun info->port.closing_wait = 30*HZ;
4163*4882a593Smuzhiyun init_waitqueue_head(&info->status_event_wait_q);
4164*4882a593Smuzhiyun init_waitqueue_head(&info->event_wait_q);
4165*4882a593Smuzhiyun spin_lock_init(&info->irq_spinlock);
4166*4882a593Smuzhiyun spin_lock_init(&info->netlock);
4167*4882a593Smuzhiyun memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4168*4882a593Smuzhiyun info->idle_mode = HDLC_TXIDLE_FLAGS;
4169*4882a593Smuzhiyun info->num_tx_dma_buffers = 1;
4170*4882a593Smuzhiyun info->num_tx_holding_buffers = 0;
4171*4882a593Smuzhiyun }
4172*4882a593Smuzhiyun
4173*4882a593Smuzhiyun return info;
4174*4882a593Smuzhiyun
4175*4882a593Smuzhiyun } /* end of mgsl_allocate_device()*/
4176*4882a593Smuzhiyun
4177*4882a593Smuzhiyun static const struct tty_operations mgsl_ops = {
4178*4882a593Smuzhiyun .install = mgsl_install,
4179*4882a593Smuzhiyun .open = mgsl_open,
4180*4882a593Smuzhiyun .close = mgsl_close,
4181*4882a593Smuzhiyun .write = mgsl_write,
4182*4882a593Smuzhiyun .put_char = mgsl_put_char,
4183*4882a593Smuzhiyun .flush_chars = mgsl_flush_chars,
4184*4882a593Smuzhiyun .write_room = mgsl_write_room,
4185*4882a593Smuzhiyun .chars_in_buffer = mgsl_chars_in_buffer,
4186*4882a593Smuzhiyun .flush_buffer = mgsl_flush_buffer,
4187*4882a593Smuzhiyun .ioctl = mgsl_ioctl,
4188*4882a593Smuzhiyun .throttle = mgsl_throttle,
4189*4882a593Smuzhiyun .unthrottle = mgsl_unthrottle,
4190*4882a593Smuzhiyun .send_xchar = mgsl_send_xchar,
4191*4882a593Smuzhiyun .break_ctl = mgsl_break,
4192*4882a593Smuzhiyun .wait_until_sent = mgsl_wait_until_sent,
4193*4882a593Smuzhiyun .set_termios = mgsl_set_termios,
4194*4882a593Smuzhiyun .stop = mgsl_stop,
4195*4882a593Smuzhiyun .start = mgsl_start,
4196*4882a593Smuzhiyun .hangup = mgsl_hangup,
4197*4882a593Smuzhiyun .tiocmget = tiocmget,
4198*4882a593Smuzhiyun .tiocmset = tiocmset,
4199*4882a593Smuzhiyun .get_icount = msgl_get_icount,
4200*4882a593Smuzhiyun .proc_show = mgsl_proc_show,
4201*4882a593Smuzhiyun };
4202*4882a593Smuzhiyun
4203*4882a593Smuzhiyun /*
4204*4882a593Smuzhiyun * perform tty device initialization
4205*4882a593Smuzhiyun */
mgsl_init_tty(void)4206*4882a593Smuzhiyun static int mgsl_init_tty(void)
4207*4882a593Smuzhiyun {
4208*4882a593Smuzhiyun int rc;
4209*4882a593Smuzhiyun
4210*4882a593Smuzhiyun serial_driver = alloc_tty_driver(128);
4211*4882a593Smuzhiyun if (!serial_driver)
4212*4882a593Smuzhiyun return -ENOMEM;
4213*4882a593Smuzhiyun
4214*4882a593Smuzhiyun serial_driver->driver_name = "synclink";
4215*4882a593Smuzhiyun serial_driver->name = "ttySL";
4216*4882a593Smuzhiyun serial_driver->major = ttymajor;
4217*4882a593Smuzhiyun serial_driver->minor_start = 64;
4218*4882a593Smuzhiyun serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4219*4882a593Smuzhiyun serial_driver->subtype = SERIAL_TYPE_NORMAL;
4220*4882a593Smuzhiyun serial_driver->init_termios = tty_std_termios;
4221*4882a593Smuzhiyun serial_driver->init_termios.c_cflag =
4222*4882a593Smuzhiyun B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4223*4882a593Smuzhiyun serial_driver->init_termios.c_ispeed = 9600;
4224*4882a593Smuzhiyun serial_driver->init_termios.c_ospeed = 9600;
4225*4882a593Smuzhiyun serial_driver->flags = TTY_DRIVER_REAL_RAW;
4226*4882a593Smuzhiyun tty_set_operations(serial_driver, &mgsl_ops);
4227*4882a593Smuzhiyun if ((rc = tty_register_driver(serial_driver)) < 0) {
4228*4882a593Smuzhiyun printk("%s(%d):Couldn't register serial driver\n",
4229*4882a593Smuzhiyun __FILE__,__LINE__);
4230*4882a593Smuzhiyun put_tty_driver(serial_driver);
4231*4882a593Smuzhiyun serial_driver = NULL;
4232*4882a593Smuzhiyun return rc;
4233*4882a593Smuzhiyun }
4234*4882a593Smuzhiyun
4235*4882a593Smuzhiyun printk("%s %s, tty major#%d\n",
4236*4882a593Smuzhiyun driver_name, driver_version,
4237*4882a593Smuzhiyun serial_driver->major);
4238*4882a593Smuzhiyun return 0;
4239*4882a593Smuzhiyun }
4240*4882a593Smuzhiyun
synclink_cleanup(void)4241*4882a593Smuzhiyun static void synclink_cleanup(void)
4242*4882a593Smuzhiyun {
4243*4882a593Smuzhiyun int rc;
4244*4882a593Smuzhiyun struct mgsl_struct *info;
4245*4882a593Smuzhiyun struct mgsl_struct *tmp;
4246*4882a593Smuzhiyun
4247*4882a593Smuzhiyun printk("Unloading %s: %s\n", driver_name, driver_version);
4248*4882a593Smuzhiyun
4249*4882a593Smuzhiyun if (serial_driver) {
4250*4882a593Smuzhiyun rc = tty_unregister_driver(serial_driver);
4251*4882a593Smuzhiyun if (rc)
4252*4882a593Smuzhiyun printk("%s(%d) failed to unregister tty driver err=%d\n",
4253*4882a593Smuzhiyun __FILE__,__LINE__,rc);
4254*4882a593Smuzhiyun put_tty_driver(serial_driver);
4255*4882a593Smuzhiyun }
4256*4882a593Smuzhiyun
4257*4882a593Smuzhiyun info = mgsl_device_list;
4258*4882a593Smuzhiyun while(info) {
4259*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
4260*4882a593Smuzhiyun hdlcdev_exit(info);
4261*4882a593Smuzhiyun #endif
4262*4882a593Smuzhiyun mgsl_release_resources(info);
4263*4882a593Smuzhiyun tmp = info;
4264*4882a593Smuzhiyun info = info->next_device;
4265*4882a593Smuzhiyun tty_port_destroy(&tmp->port);
4266*4882a593Smuzhiyun kfree(tmp);
4267*4882a593Smuzhiyun }
4268*4882a593Smuzhiyun
4269*4882a593Smuzhiyun if (pci_registered)
4270*4882a593Smuzhiyun pci_unregister_driver(&synclink_pci_driver);
4271*4882a593Smuzhiyun }
4272*4882a593Smuzhiyun
synclink_init(void)4273*4882a593Smuzhiyun static int __init synclink_init(void)
4274*4882a593Smuzhiyun {
4275*4882a593Smuzhiyun int rc;
4276*4882a593Smuzhiyun
4277*4882a593Smuzhiyun if (break_on_load) {
4278*4882a593Smuzhiyun mgsl_get_text_ptr();
4279*4882a593Smuzhiyun BREAKPOINT();
4280*4882a593Smuzhiyun }
4281*4882a593Smuzhiyun
4282*4882a593Smuzhiyun printk("%s %s\n", driver_name, driver_version);
4283*4882a593Smuzhiyun
4284*4882a593Smuzhiyun if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4285*4882a593Smuzhiyun printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4286*4882a593Smuzhiyun else
4287*4882a593Smuzhiyun pci_registered = true;
4288*4882a593Smuzhiyun
4289*4882a593Smuzhiyun if ((rc = mgsl_init_tty()) < 0)
4290*4882a593Smuzhiyun goto error;
4291*4882a593Smuzhiyun
4292*4882a593Smuzhiyun return 0;
4293*4882a593Smuzhiyun
4294*4882a593Smuzhiyun error:
4295*4882a593Smuzhiyun synclink_cleanup();
4296*4882a593Smuzhiyun return rc;
4297*4882a593Smuzhiyun }
4298*4882a593Smuzhiyun
synclink_exit(void)4299*4882a593Smuzhiyun static void __exit synclink_exit(void)
4300*4882a593Smuzhiyun {
4301*4882a593Smuzhiyun synclink_cleanup();
4302*4882a593Smuzhiyun }
4303*4882a593Smuzhiyun
4304*4882a593Smuzhiyun module_init(synclink_init);
4305*4882a593Smuzhiyun module_exit(synclink_exit);
4306*4882a593Smuzhiyun
4307*4882a593Smuzhiyun /*
4308*4882a593Smuzhiyun * usc_RTCmd()
4309*4882a593Smuzhiyun *
4310*4882a593Smuzhiyun * Issue a USC Receive/Transmit command to the
4311*4882a593Smuzhiyun * Channel Command/Address Register (CCAR).
4312*4882a593Smuzhiyun *
4313*4882a593Smuzhiyun * Notes:
4314*4882a593Smuzhiyun *
4315*4882a593Smuzhiyun * The command is encoded in the most significant 5 bits <15..11>
4316*4882a593Smuzhiyun * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4317*4882a593Smuzhiyun * and Bits <6..0> must be written as zeros.
4318*4882a593Smuzhiyun *
4319*4882a593Smuzhiyun * Arguments:
4320*4882a593Smuzhiyun *
4321*4882a593Smuzhiyun * info pointer to device information structure
4322*4882a593Smuzhiyun * Cmd command mask (use symbolic macros)
4323*4882a593Smuzhiyun *
4324*4882a593Smuzhiyun * Return Value:
4325*4882a593Smuzhiyun *
4326*4882a593Smuzhiyun * None
4327*4882a593Smuzhiyun */
usc_RTCmd(struct mgsl_struct * info,u16 Cmd)4328*4882a593Smuzhiyun static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4329*4882a593Smuzhiyun {
4330*4882a593Smuzhiyun /* output command to CCAR in bits <15..11> */
4331*4882a593Smuzhiyun /* preserve bits <10..7>, bits <6..0> must be zero */
4332*4882a593Smuzhiyun
4333*4882a593Smuzhiyun outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4334*4882a593Smuzhiyun
4335*4882a593Smuzhiyun /* Read to flush write to CCAR */
4336*4882a593Smuzhiyun inw( info->io_base + CCAR );
4337*4882a593Smuzhiyun
4338*4882a593Smuzhiyun } /* end of usc_RTCmd() */
4339*4882a593Smuzhiyun
4340*4882a593Smuzhiyun /*
4341*4882a593Smuzhiyun * usc_DmaCmd()
4342*4882a593Smuzhiyun *
4343*4882a593Smuzhiyun * Issue a DMA command to the DMA Command/Address Register (DCAR).
4344*4882a593Smuzhiyun *
4345*4882a593Smuzhiyun * Arguments:
4346*4882a593Smuzhiyun *
4347*4882a593Smuzhiyun * info pointer to device information structure
4348*4882a593Smuzhiyun * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4349*4882a593Smuzhiyun *
4350*4882a593Smuzhiyun * Return Value:
4351*4882a593Smuzhiyun *
4352*4882a593Smuzhiyun * None
4353*4882a593Smuzhiyun */
usc_DmaCmd(struct mgsl_struct * info,u16 Cmd)4354*4882a593Smuzhiyun static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4355*4882a593Smuzhiyun {
4356*4882a593Smuzhiyun /* write command mask to DCAR */
4357*4882a593Smuzhiyun outw( Cmd + info->mbre_bit, info->io_base );
4358*4882a593Smuzhiyun
4359*4882a593Smuzhiyun /* Read to flush write to DCAR */
4360*4882a593Smuzhiyun inw( info->io_base );
4361*4882a593Smuzhiyun
4362*4882a593Smuzhiyun } /* end of usc_DmaCmd() */
4363*4882a593Smuzhiyun
4364*4882a593Smuzhiyun /*
4365*4882a593Smuzhiyun * usc_OutDmaReg()
4366*4882a593Smuzhiyun *
4367*4882a593Smuzhiyun * Write a 16-bit value to a USC DMA register
4368*4882a593Smuzhiyun *
4369*4882a593Smuzhiyun * Arguments:
4370*4882a593Smuzhiyun *
4371*4882a593Smuzhiyun * info pointer to device info structure
4372*4882a593Smuzhiyun * RegAddr register address (number) for write
4373*4882a593Smuzhiyun * RegValue 16-bit value to write to register
4374*4882a593Smuzhiyun *
4375*4882a593Smuzhiyun * Return Value:
4376*4882a593Smuzhiyun *
4377*4882a593Smuzhiyun * None
4378*4882a593Smuzhiyun *
4379*4882a593Smuzhiyun */
usc_OutDmaReg(struct mgsl_struct * info,u16 RegAddr,u16 RegValue)4380*4882a593Smuzhiyun static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4381*4882a593Smuzhiyun {
4382*4882a593Smuzhiyun /* Note: The DCAR is located at the adapter base address */
4383*4882a593Smuzhiyun /* Note: must preserve state of BIT8 in DCAR */
4384*4882a593Smuzhiyun
4385*4882a593Smuzhiyun outw( RegAddr + info->mbre_bit, info->io_base );
4386*4882a593Smuzhiyun outw( RegValue, info->io_base );
4387*4882a593Smuzhiyun
4388*4882a593Smuzhiyun /* Read to flush write to DCAR */
4389*4882a593Smuzhiyun inw( info->io_base );
4390*4882a593Smuzhiyun
4391*4882a593Smuzhiyun } /* end of usc_OutDmaReg() */
4392*4882a593Smuzhiyun
4393*4882a593Smuzhiyun /*
4394*4882a593Smuzhiyun * usc_InDmaReg()
4395*4882a593Smuzhiyun *
4396*4882a593Smuzhiyun * Read a 16-bit value from a DMA register
4397*4882a593Smuzhiyun *
4398*4882a593Smuzhiyun * Arguments:
4399*4882a593Smuzhiyun *
4400*4882a593Smuzhiyun * info pointer to device info structure
4401*4882a593Smuzhiyun * RegAddr register address (number) to read from
4402*4882a593Smuzhiyun *
4403*4882a593Smuzhiyun * Return Value:
4404*4882a593Smuzhiyun *
4405*4882a593Smuzhiyun * The 16-bit value read from register
4406*4882a593Smuzhiyun *
4407*4882a593Smuzhiyun */
usc_InDmaReg(struct mgsl_struct * info,u16 RegAddr)4408*4882a593Smuzhiyun static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4409*4882a593Smuzhiyun {
4410*4882a593Smuzhiyun /* Note: The DCAR is located at the adapter base address */
4411*4882a593Smuzhiyun /* Note: must preserve state of BIT8 in DCAR */
4412*4882a593Smuzhiyun
4413*4882a593Smuzhiyun outw( RegAddr + info->mbre_bit, info->io_base );
4414*4882a593Smuzhiyun return inw( info->io_base );
4415*4882a593Smuzhiyun
4416*4882a593Smuzhiyun } /* end of usc_InDmaReg() */
4417*4882a593Smuzhiyun
4418*4882a593Smuzhiyun /*
4419*4882a593Smuzhiyun *
4420*4882a593Smuzhiyun * usc_OutReg()
4421*4882a593Smuzhiyun *
4422*4882a593Smuzhiyun * Write a 16-bit value to a USC serial channel register
4423*4882a593Smuzhiyun *
4424*4882a593Smuzhiyun * Arguments:
4425*4882a593Smuzhiyun *
4426*4882a593Smuzhiyun * info pointer to device info structure
4427*4882a593Smuzhiyun * RegAddr register address (number) to write to
4428*4882a593Smuzhiyun * RegValue 16-bit value to write to register
4429*4882a593Smuzhiyun *
4430*4882a593Smuzhiyun * Return Value:
4431*4882a593Smuzhiyun *
4432*4882a593Smuzhiyun * None
4433*4882a593Smuzhiyun *
4434*4882a593Smuzhiyun */
usc_OutReg(struct mgsl_struct * info,u16 RegAddr,u16 RegValue)4435*4882a593Smuzhiyun static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4436*4882a593Smuzhiyun {
4437*4882a593Smuzhiyun outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4438*4882a593Smuzhiyun outw( RegValue, info->io_base + CCAR );
4439*4882a593Smuzhiyun
4440*4882a593Smuzhiyun /* Read to flush write to CCAR */
4441*4882a593Smuzhiyun inw( info->io_base + CCAR );
4442*4882a593Smuzhiyun
4443*4882a593Smuzhiyun } /* end of usc_OutReg() */
4444*4882a593Smuzhiyun
4445*4882a593Smuzhiyun /*
4446*4882a593Smuzhiyun * usc_InReg()
4447*4882a593Smuzhiyun *
4448*4882a593Smuzhiyun * Reads a 16-bit value from a USC serial channel register
4449*4882a593Smuzhiyun *
4450*4882a593Smuzhiyun * Arguments:
4451*4882a593Smuzhiyun *
4452*4882a593Smuzhiyun * info pointer to device extension
4453*4882a593Smuzhiyun * RegAddr register address (number) to read from
4454*4882a593Smuzhiyun *
4455*4882a593Smuzhiyun * Return Value:
4456*4882a593Smuzhiyun *
4457*4882a593Smuzhiyun * 16-bit value read from register
4458*4882a593Smuzhiyun */
usc_InReg(struct mgsl_struct * info,u16 RegAddr)4459*4882a593Smuzhiyun static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4460*4882a593Smuzhiyun {
4461*4882a593Smuzhiyun outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4462*4882a593Smuzhiyun return inw( info->io_base + CCAR );
4463*4882a593Smuzhiyun
4464*4882a593Smuzhiyun } /* end of usc_InReg() */
4465*4882a593Smuzhiyun
4466*4882a593Smuzhiyun /* usc_set_sdlc_mode()
4467*4882a593Smuzhiyun *
4468*4882a593Smuzhiyun * Set up the adapter for SDLC DMA communications.
4469*4882a593Smuzhiyun *
4470*4882a593Smuzhiyun * Arguments: info pointer to device instance data
4471*4882a593Smuzhiyun * Return Value: NONE
4472*4882a593Smuzhiyun */
usc_set_sdlc_mode(struct mgsl_struct * info)4473*4882a593Smuzhiyun static void usc_set_sdlc_mode( struct mgsl_struct *info )
4474*4882a593Smuzhiyun {
4475*4882a593Smuzhiyun u16 RegValue;
4476*4882a593Smuzhiyun bool PreSL1660;
4477*4882a593Smuzhiyun
4478*4882a593Smuzhiyun /*
4479*4882a593Smuzhiyun * determine if the IUSC on the adapter is pre-SL1660. If
4480*4882a593Smuzhiyun * not, take advantage of the UnderWait feature of more
4481*4882a593Smuzhiyun * modern chips. If an underrun occurs and this bit is set,
4482*4882a593Smuzhiyun * the transmitter will idle the programmed idle pattern
4483*4882a593Smuzhiyun * until the driver has time to service the underrun. Otherwise,
4484*4882a593Smuzhiyun * the dma controller may get the cycles previously requested
4485*4882a593Smuzhiyun * and begin transmitting queued tx data.
4486*4882a593Smuzhiyun */
4487*4882a593Smuzhiyun usc_OutReg(info,TMCR,0x1f);
4488*4882a593Smuzhiyun RegValue=usc_InReg(info,TMDR);
4489*4882a593Smuzhiyun PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4490*4882a593Smuzhiyun
4491*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4492*4882a593Smuzhiyun {
4493*4882a593Smuzhiyun /*
4494*4882a593Smuzhiyun ** Channel Mode Register (CMR)
4495*4882a593Smuzhiyun **
4496*4882a593Smuzhiyun ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4497*4882a593Smuzhiyun ** <13> 0 0 = Transmit Disabled (initially)
4498*4882a593Smuzhiyun ** <12> 0 1 = Consecutive Idles share common 0
4499*4882a593Smuzhiyun ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4500*4882a593Smuzhiyun ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4501*4882a593Smuzhiyun ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4502*4882a593Smuzhiyun **
4503*4882a593Smuzhiyun ** 1000 1110 0000 0110 = 0x8e06
4504*4882a593Smuzhiyun */
4505*4882a593Smuzhiyun RegValue = 0x8e06;
4506*4882a593Smuzhiyun
4507*4882a593Smuzhiyun /*--------------------------------------------------
4508*4882a593Smuzhiyun * ignore user options for UnderRun Actions and
4509*4882a593Smuzhiyun * preambles
4510*4882a593Smuzhiyun *--------------------------------------------------*/
4511*4882a593Smuzhiyun }
4512*4882a593Smuzhiyun else
4513*4882a593Smuzhiyun {
4514*4882a593Smuzhiyun /* Channel mode Register (CMR)
4515*4882a593Smuzhiyun *
4516*4882a593Smuzhiyun * <15..14> 00 Tx Sub modes, Underrun Action
4517*4882a593Smuzhiyun * <13> 0 1 = Send Preamble before opening flag
4518*4882a593Smuzhiyun * <12> 0 1 = Consecutive Idles share common 0
4519*4882a593Smuzhiyun * <11..8> 0110 Transmitter mode = HDLC/SDLC
4520*4882a593Smuzhiyun * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4521*4882a593Smuzhiyun * <3..0> 0110 Receiver mode = HDLC/SDLC
4522*4882a593Smuzhiyun *
4523*4882a593Smuzhiyun * 0000 0110 0000 0110 = 0x0606
4524*4882a593Smuzhiyun */
4525*4882a593Smuzhiyun if (info->params.mode == MGSL_MODE_RAW) {
4526*4882a593Smuzhiyun RegValue = 0x0001; /* Set Receive mode = external sync */
4527*4882a593Smuzhiyun
4528*4882a593Smuzhiyun usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4529*4882a593Smuzhiyun (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4530*4882a593Smuzhiyun
4531*4882a593Smuzhiyun /*
4532*4882a593Smuzhiyun * TxSubMode:
4533*4882a593Smuzhiyun * CMR <15> 0 Don't send CRC on Tx Underrun
4534*4882a593Smuzhiyun * CMR <14> x undefined
4535*4882a593Smuzhiyun * CMR <13> 0 Send preamble before openning sync
4536*4882a593Smuzhiyun * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4537*4882a593Smuzhiyun *
4538*4882a593Smuzhiyun * TxMode:
4539*4882a593Smuzhiyun * CMR <11-8) 0100 MonoSync
4540*4882a593Smuzhiyun *
4541*4882a593Smuzhiyun * 0x00 0100 xxxx xxxx 04xx
4542*4882a593Smuzhiyun */
4543*4882a593Smuzhiyun RegValue |= 0x0400;
4544*4882a593Smuzhiyun }
4545*4882a593Smuzhiyun else {
4546*4882a593Smuzhiyun
4547*4882a593Smuzhiyun RegValue = 0x0606;
4548*4882a593Smuzhiyun
4549*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4550*4882a593Smuzhiyun RegValue |= BIT14;
4551*4882a593Smuzhiyun else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4552*4882a593Smuzhiyun RegValue |= BIT15;
4553*4882a593Smuzhiyun else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4554*4882a593Smuzhiyun RegValue |= BIT15 | BIT14;
4555*4882a593Smuzhiyun }
4556*4882a593Smuzhiyun
4557*4882a593Smuzhiyun if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4558*4882a593Smuzhiyun RegValue |= BIT13;
4559*4882a593Smuzhiyun }
4560*4882a593Smuzhiyun
4561*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_HDLC &&
4562*4882a593Smuzhiyun (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4563*4882a593Smuzhiyun RegValue |= BIT12;
4564*4882a593Smuzhiyun
4565*4882a593Smuzhiyun if ( info->params.addr_filter != 0xff )
4566*4882a593Smuzhiyun {
4567*4882a593Smuzhiyun /* set up receive address filtering */
4568*4882a593Smuzhiyun usc_OutReg( info, RSR, info->params.addr_filter );
4569*4882a593Smuzhiyun RegValue |= BIT4;
4570*4882a593Smuzhiyun }
4571*4882a593Smuzhiyun
4572*4882a593Smuzhiyun usc_OutReg( info, CMR, RegValue );
4573*4882a593Smuzhiyun info->cmr_value = RegValue;
4574*4882a593Smuzhiyun
4575*4882a593Smuzhiyun /* Receiver mode Register (RMR)
4576*4882a593Smuzhiyun *
4577*4882a593Smuzhiyun * <15..13> 000 encoding
4578*4882a593Smuzhiyun * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4579*4882a593Smuzhiyun * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4580*4882a593Smuzhiyun * <9> 0 1 = Include Receive chars in CRC
4581*4882a593Smuzhiyun * <8> 1 1 = Use Abort/PE bit as abort indicator
4582*4882a593Smuzhiyun * <7..6> 00 Even parity
4583*4882a593Smuzhiyun * <5> 0 parity disabled
4584*4882a593Smuzhiyun * <4..2> 000 Receive Char Length = 8 bits
4585*4882a593Smuzhiyun * <1..0> 00 Disable Receiver
4586*4882a593Smuzhiyun *
4587*4882a593Smuzhiyun * 0000 0101 0000 0000 = 0x0500
4588*4882a593Smuzhiyun */
4589*4882a593Smuzhiyun
4590*4882a593Smuzhiyun RegValue = 0x0500;
4591*4882a593Smuzhiyun
4592*4882a593Smuzhiyun switch ( info->params.encoding ) {
4593*4882a593Smuzhiyun case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4594*4882a593Smuzhiyun case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4595*4882a593Smuzhiyun case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4596*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4597*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4598*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4599*4882a593Smuzhiyun case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4600*4882a593Smuzhiyun }
4601*4882a593Smuzhiyun
4602*4882a593Smuzhiyun if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4603*4882a593Smuzhiyun RegValue |= BIT9;
4604*4882a593Smuzhiyun else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4605*4882a593Smuzhiyun RegValue |= ( BIT12 | BIT10 | BIT9 );
4606*4882a593Smuzhiyun
4607*4882a593Smuzhiyun usc_OutReg( info, RMR, RegValue );
4608*4882a593Smuzhiyun
4609*4882a593Smuzhiyun /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4610*4882a593Smuzhiyun /* When an opening flag of an SDLC frame is recognized the */
4611*4882a593Smuzhiyun /* Receive Character count (RCC) is loaded with the value in */
4612*4882a593Smuzhiyun /* RCLR. The RCC is decremented for each received byte. The */
4613*4882a593Smuzhiyun /* value of RCC is stored after the closing flag of the frame */
4614*4882a593Smuzhiyun /* allowing the frame size to be computed. */
4615*4882a593Smuzhiyun
4616*4882a593Smuzhiyun usc_OutReg( info, RCLR, RCLRVALUE );
4617*4882a593Smuzhiyun
4618*4882a593Smuzhiyun usc_RCmd( info, RCmd_SelectRicrdma_level );
4619*4882a593Smuzhiyun
4620*4882a593Smuzhiyun /* Receive Interrupt Control Register (RICR)
4621*4882a593Smuzhiyun *
4622*4882a593Smuzhiyun * <15..8> ? RxFIFO DMA Request Level
4623*4882a593Smuzhiyun * <7> 0 Exited Hunt IA (Interrupt Arm)
4624*4882a593Smuzhiyun * <6> 0 Idle Received IA
4625*4882a593Smuzhiyun * <5> 0 Break/Abort IA
4626*4882a593Smuzhiyun * <4> 0 Rx Bound IA
4627*4882a593Smuzhiyun * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4628*4882a593Smuzhiyun * <2> 0 Abort/PE IA
4629*4882a593Smuzhiyun * <1> 1 Rx Overrun IA
4630*4882a593Smuzhiyun * <0> 0 Select TC0 value for readback
4631*4882a593Smuzhiyun *
4632*4882a593Smuzhiyun * 0000 0000 0000 1000 = 0x000a
4633*4882a593Smuzhiyun */
4634*4882a593Smuzhiyun
4635*4882a593Smuzhiyun /* Carry over the Exit Hunt and Idle Received bits */
4636*4882a593Smuzhiyun /* in case they have been armed by usc_ArmEvents. */
4637*4882a593Smuzhiyun
4638*4882a593Smuzhiyun RegValue = usc_InReg( info, RICR ) & 0xc0;
4639*4882a593Smuzhiyun
4640*4882a593Smuzhiyun usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4641*4882a593Smuzhiyun
4642*4882a593Smuzhiyun /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4643*4882a593Smuzhiyun
4644*4882a593Smuzhiyun usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4645*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4646*4882a593Smuzhiyun
4647*4882a593Smuzhiyun /* Transmit mode Register (TMR)
4648*4882a593Smuzhiyun *
4649*4882a593Smuzhiyun * <15..13> 000 encoding
4650*4882a593Smuzhiyun * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4651*4882a593Smuzhiyun * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4652*4882a593Smuzhiyun * <9> 0 1 = Tx CRC Enabled
4653*4882a593Smuzhiyun * <8> 0 1 = Append CRC to end of transmit frame
4654*4882a593Smuzhiyun * <7..6> 00 Transmit parity Even
4655*4882a593Smuzhiyun * <5> 0 Transmit parity Disabled
4656*4882a593Smuzhiyun * <4..2> 000 Tx Char Length = 8 bits
4657*4882a593Smuzhiyun * <1..0> 00 Disable Transmitter
4658*4882a593Smuzhiyun *
4659*4882a593Smuzhiyun * 0000 0100 0000 0000 = 0x0400
4660*4882a593Smuzhiyun */
4661*4882a593Smuzhiyun
4662*4882a593Smuzhiyun RegValue = 0x0400;
4663*4882a593Smuzhiyun
4664*4882a593Smuzhiyun switch ( info->params.encoding ) {
4665*4882a593Smuzhiyun case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4666*4882a593Smuzhiyun case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4667*4882a593Smuzhiyun case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4668*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4669*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4670*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4671*4882a593Smuzhiyun case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4672*4882a593Smuzhiyun }
4673*4882a593Smuzhiyun
4674*4882a593Smuzhiyun if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4675*4882a593Smuzhiyun RegValue |= BIT9 | BIT8;
4676*4882a593Smuzhiyun else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4677*4882a593Smuzhiyun RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4678*4882a593Smuzhiyun
4679*4882a593Smuzhiyun usc_OutReg( info, TMR, RegValue );
4680*4882a593Smuzhiyun
4681*4882a593Smuzhiyun usc_set_txidle( info );
4682*4882a593Smuzhiyun
4683*4882a593Smuzhiyun
4684*4882a593Smuzhiyun usc_TCmd( info, TCmd_SelectTicrdma_level );
4685*4882a593Smuzhiyun
4686*4882a593Smuzhiyun /* Transmit Interrupt Control Register (TICR)
4687*4882a593Smuzhiyun *
4688*4882a593Smuzhiyun * <15..8> ? Transmit FIFO DMA Level
4689*4882a593Smuzhiyun * <7> 0 Present IA (Interrupt Arm)
4690*4882a593Smuzhiyun * <6> 0 Idle Sent IA
4691*4882a593Smuzhiyun * <5> 1 Abort Sent IA
4692*4882a593Smuzhiyun * <4> 1 EOF/EOM Sent IA
4693*4882a593Smuzhiyun * <3> 0 CRC Sent IA
4694*4882a593Smuzhiyun * <2> 1 1 = Wait for SW Trigger to Start Frame
4695*4882a593Smuzhiyun * <1> 1 Tx Underrun IA
4696*4882a593Smuzhiyun * <0> 0 TC0 constant on read back
4697*4882a593Smuzhiyun *
4698*4882a593Smuzhiyun * 0000 0000 0011 0110 = 0x0036
4699*4882a593Smuzhiyun */
4700*4882a593Smuzhiyun
4701*4882a593Smuzhiyun usc_OutReg( info, TICR, 0x0736 );
4702*4882a593Smuzhiyun
4703*4882a593Smuzhiyun usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4704*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4705*4882a593Smuzhiyun
4706*4882a593Smuzhiyun /*
4707*4882a593Smuzhiyun ** Transmit Command/Status Register (TCSR)
4708*4882a593Smuzhiyun **
4709*4882a593Smuzhiyun ** <15..12> 0000 TCmd
4710*4882a593Smuzhiyun ** <11> 0/1 UnderWait
4711*4882a593Smuzhiyun ** <10..08> 000 TxIdle
4712*4882a593Smuzhiyun ** <7> x PreSent
4713*4882a593Smuzhiyun ** <6> x IdleSent
4714*4882a593Smuzhiyun ** <5> x AbortSent
4715*4882a593Smuzhiyun ** <4> x EOF/EOM Sent
4716*4882a593Smuzhiyun ** <3> x CRC Sent
4717*4882a593Smuzhiyun ** <2> x All Sent
4718*4882a593Smuzhiyun ** <1> x TxUnder
4719*4882a593Smuzhiyun ** <0> x TxEmpty
4720*4882a593Smuzhiyun **
4721*4882a593Smuzhiyun ** 0000 0000 0000 0000 = 0x0000
4722*4882a593Smuzhiyun */
4723*4882a593Smuzhiyun info->tcsr_value = 0;
4724*4882a593Smuzhiyun
4725*4882a593Smuzhiyun if ( !PreSL1660 )
4726*4882a593Smuzhiyun info->tcsr_value |= TCSR_UNDERWAIT;
4727*4882a593Smuzhiyun
4728*4882a593Smuzhiyun usc_OutReg( info, TCSR, info->tcsr_value );
4729*4882a593Smuzhiyun
4730*4882a593Smuzhiyun /* Clock mode Control Register (CMCR)
4731*4882a593Smuzhiyun *
4732*4882a593Smuzhiyun * <15..14> 00 counter 1 Source = Disabled
4733*4882a593Smuzhiyun * <13..12> 00 counter 0 Source = Disabled
4734*4882a593Smuzhiyun * <11..10> 11 BRG1 Input is TxC Pin
4735*4882a593Smuzhiyun * <9..8> 11 BRG0 Input is TxC Pin
4736*4882a593Smuzhiyun * <7..6> 01 DPLL Input is BRG1 Output
4737*4882a593Smuzhiyun * <5..3> XXX TxCLK comes from Port 0
4738*4882a593Smuzhiyun * <2..0> XXX RxCLK comes from Port 1
4739*4882a593Smuzhiyun *
4740*4882a593Smuzhiyun * 0000 1111 0111 0111 = 0x0f77
4741*4882a593Smuzhiyun */
4742*4882a593Smuzhiyun
4743*4882a593Smuzhiyun RegValue = 0x0f40;
4744*4882a593Smuzhiyun
4745*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4746*4882a593Smuzhiyun RegValue |= 0x0003; /* RxCLK from DPLL */
4747*4882a593Smuzhiyun else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4748*4882a593Smuzhiyun RegValue |= 0x0004; /* RxCLK from BRG0 */
4749*4882a593Smuzhiyun else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4750*4882a593Smuzhiyun RegValue |= 0x0006; /* RxCLK from TXC Input */
4751*4882a593Smuzhiyun else
4752*4882a593Smuzhiyun RegValue |= 0x0007; /* RxCLK from Port1 */
4753*4882a593Smuzhiyun
4754*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4755*4882a593Smuzhiyun RegValue |= 0x0018; /* TxCLK from DPLL */
4756*4882a593Smuzhiyun else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4757*4882a593Smuzhiyun RegValue |= 0x0020; /* TxCLK from BRG0 */
4758*4882a593Smuzhiyun else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4759*4882a593Smuzhiyun RegValue |= 0x0038; /* RxCLK from TXC Input */
4760*4882a593Smuzhiyun else
4761*4882a593Smuzhiyun RegValue |= 0x0030; /* TxCLK from Port0 */
4762*4882a593Smuzhiyun
4763*4882a593Smuzhiyun usc_OutReg( info, CMCR, RegValue );
4764*4882a593Smuzhiyun
4765*4882a593Smuzhiyun
4766*4882a593Smuzhiyun /* Hardware Configuration Register (HCR)
4767*4882a593Smuzhiyun *
4768*4882a593Smuzhiyun * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4769*4882a593Smuzhiyun * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4770*4882a593Smuzhiyun * <12> 0 CVOK:0=report code violation in biphase
4771*4882a593Smuzhiyun * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4772*4882a593Smuzhiyun * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4773*4882a593Smuzhiyun * <7..6> 00 reserved
4774*4882a593Smuzhiyun * <5> 0 BRG1 mode:0=continuous,1=single cycle
4775*4882a593Smuzhiyun * <4> X BRG1 Enable
4776*4882a593Smuzhiyun * <3..2> 00 reserved
4777*4882a593Smuzhiyun * <1> 0 BRG0 mode:0=continuous,1=single cycle
4778*4882a593Smuzhiyun * <0> 0 BRG0 Enable
4779*4882a593Smuzhiyun */
4780*4882a593Smuzhiyun
4781*4882a593Smuzhiyun RegValue = 0x0000;
4782*4882a593Smuzhiyun
4783*4882a593Smuzhiyun if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
4784*4882a593Smuzhiyun u32 XtalSpeed;
4785*4882a593Smuzhiyun u32 DpllDivisor;
4786*4882a593Smuzhiyun u16 Tc;
4787*4882a593Smuzhiyun
4788*4882a593Smuzhiyun /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4789*4882a593Smuzhiyun /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4790*4882a593Smuzhiyun
4791*4882a593Smuzhiyun XtalSpeed = 11059200;
4792*4882a593Smuzhiyun
4793*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4794*4882a593Smuzhiyun DpllDivisor = 16;
4795*4882a593Smuzhiyun RegValue |= BIT10;
4796*4882a593Smuzhiyun }
4797*4882a593Smuzhiyun else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4798*4882a593Smuzhiyun DpllDivisor = 8;
4799*4882a593Smuzhiyun RegValue |= BIT11;
4800*4882a593Smuzhiyun }
4801*4882a593Smuzhiyun else
4802*4882a593Smuzhiyun DpllDivisor = 32;
4803*4882a593Smuzhiyun
4804*4882a593Smuzhiyun /* Tc = (Xtal/Speed) - 1 */
4805*4882a593Smuzhiyun /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4806*4882a593Smuzhiyun /* then rounding up gives a more precise time constant. Instead */
4807*4882a593Smuzhiyun /* of rounding up and then subtracting 1 we just don't subtract */
4808*4882a593Smuzhiyun /* the one in this case. */
4809*4882a593Smuzhiyun
4810*4882a593Smuzhiyun /*--------------------------------------------------
4811*4882a593Smuzhiyun * ejz: for DPLL mode, application should use the
4812*4882a593Smuzhiyun * same clock speed as the partner system, even
4813*4882a593Smuzhiyun * though clocking is derived from the input RxData.
4814*4882a593Smuzhiyun * In case the user uses a 0 for the clock speed,
4815*4882a593Smuzhiyun * default to 0xffffffff and don't try to divide by
4816*4882a593Smuzhiyun * zero
4817*4882a593Smuzhiyun *--------------------------------------------------*/
4818*4882a593Smuzhiyun if ( info->params.clock_speed )
4819*4882a593Smuzhiyun {
4820*4882a593Smuzhiyun Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4821*4882a593Smuzhiyun if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4822*4882a593Smuzhiyun / info->params.clock_speed) )
4823*4882a593Smuzhiyun Tc--;
4824*4882a593Smuzhiyun }
4825*4882a593Smuzhiyun else
4826*4882a593Smuzhiyun Tc = -1;
4827*4882a593Smuzhiyun
4828*4882a593Smuzhiyun
4829*4882a593Smuzhiyun /* Write 16-bit Time Constant for BRG1 */
4830*4882a593Smuzhiyun usc_OutReg( info, TC1R, Tc );
4831*4882a593Smuzhiyun
4832*4882a593Smuzhiyun RegValue |= BIT4; /* enable BRG1 */
4833*4882a593Smuzhiyun
4834*4882a593Smuzhiyun switch ( info->params.encoding ) {
4835*4882a593Smuzhiyun case HDLC_ENCODING_NRZ:
4836*4882a593Smuzhiyun case HDLC_ENCODING_NRZB:
4837*4882a593Smuzhiyun case HDLC_ENCODING_NRZI_MARK:
4838*4882a593Smuzhiyun case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
4839*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_MARK:
4840*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
4841*4882a593Smuzhiyun case HDLC_ENCODING_BIPHASE_LEVEL:
4842*4882a593Smuzhiyun case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
4843*4882a593Smuzhiyun }
4844*4882a593Smuzhiyun }
4845*4882a593Smuzhiyun
4846*4882a593Smuzhiyun usc_OutReg( info, HCR, RegValue );
4847*4882a593Smuzhiyun
4848*4882a593Smuzhiyun
4849*4882a593Smuzhiyun /* Channel Control/status Register (CCSR)
4850*4882a593Smuzhiyun *
4851*4882a593Smuzhiyun * <15> X RCC FIFO Overflow status (RO)
4852*4882a593Smuzhiyun * <14> X RCC FIFO Not Empty status (RO)
4853*4882a593Smuzhiyun * <13> 0 1 = Clear RCC FIFO (WO)
4854*4882a593Smuzhiyun * <12> X DPLL Sync (RW)
4855*4882a593Smuzhiyun * <11> X DPLL 2 Missed Clocks status (RO)
4856*4882a593Smuzhiyun * <10> X DPLL 1 Missed Clock status (RO)
4857*4882a593Smuzhiyun * <9..8> 00 DPLL Resync on rising and falling edges (RW)
4858*4882a593Smuzhiyun * <7> X SDLC Loop On status (RO)
4859*4882a593Smuzhiyun * <6> X SDLC Loop Send status (RO)
4860*4882a593Smuzhiyun * <5> 1 Bypass counters for TxClk and RxClk (RW)
4861*4882a593Smuzhiyun * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
4862*4882a593Smuzhiyun * <1..0> 00 reserved
4863*4882a593Smuzhiyun *
4864*4882a593Smuzhiyun * 0000 0000 0010 0000 = 0x0020
4865*4882a593Smuzhiyun */
4866*4882a593Smuzhiyun
4867*4882a593Smuzhiyun usc_OutReg( info, CCSR, 0x1020 );
4868*4882a593Smuzhiyun
4869*4882a593Smuzhiyun
4870*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
4871*4882a593Smuzhiyun usc_OutReg( info, SICR,
4872*4882a593Smuzhiyun (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
4873*4882a593Smuzhiyun }
4874*4882a593Smuzhiyun
4875*4882a593Smuzhiyun
4876*4882a593Smuzhiyun /* enable Master Interrupt Enable bit (MIE) */
4877*4882a593Smuzhiyun usc_EnableMasterIrqBit( info );
4878*4882a593Smuzhiyun
4879*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
4880*4882a593Smuzhiyun TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
4881*4882a593Smuzhiyun
4882*4882a593Smuzhiyun /* arm RCC underflow interrupt */
4883*4882a593Smuzhiyun usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
4884*4882a593Smuzhiyun usc_EnableInterrupts(info, MISC);
4885*4882a593Smuzhiyun
4886*4882a593Smuzhiyun info->mbre_bit = 0;
4887*4882a593Smuzhiyun outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
4888*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
4889*4882a593Smuzhiyun info->mbre_bit = BIT8;
4890*4882a593Smuzhiyun outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
4891*4882a593Smuzhiyun
4892*4882a593Smuzhiyun /* DMA Control Register (DCR)
4893*4882a593Smuzhiyun *
4894*4882a593Smuzhiyun * <15..14> 10 Priority mode = Alternating Tx/Rx
4895*4882a593Smuzhiyun * 01 Rx has priority
4896*4882a593Smuzhiyun * 00 Tx has priority
4897*4882a593Smuzhiyun *
4898*4882a593Smuzhiyun * <13> 1 Enable Priority Preempt per DCR<15..14>
4899*4882a593Smuzhiyun * (WARNING DCR<11..10> must be 00 when this is 1)
4900*4882a593Smuzhiyun * 0 Choose activate channel per DCR<11..10>
4901*4882a593Smuzhiyun *
4902*4882a593Smuzhiyun * <12> 0 Little Endian for Array/List
4903*4882a593Smuzhiyun * <11..10> 00 Both Channels can use each bus grant
4904*4882a593Smuzhiyun * <9..6> 0000 reserved
4905*4882a593Smuzhiyun * <5> 0 7 CLK - Minimum Bus Re-request Interval
4906*4882a593Smuzhiyun * <4> 0 1 = drive D/C and S/D pins
4907*4882a593Smuzhiyun * <3> 1 1 = Add one wait state to all DMA cycles.
4908*4882a593Smuzhiyun * <2> 0 1 = Strobe /UAS on every transfer.
4909*4882a593Smuzhiyun * <1..0> 11 Addr incrementing only affects LS24 bits
4910*4882a593Smuzhiyun *
4911*4882a593Smuzhiyun * 0110 0000 0000 1011 = 0x600b
4912*4882a593Smuzhiyun */
4913*4882a593Smuzhiyun
4914*4882a593Smuzhiyun /* PCI adapter does not need DMA wait state */
4915*4882a593Smuzhiyun usc_OutDmaReg( info, DCR, 0xa00b );
4916*4882a593Smuzhiyun
4917*4882a593Smuzhiyun /* Receive DMA mode Register (RDMR)
4918*4882a593Smuzhiyun *
4919*4882a593Smuzhiyun * <15..14> 11 DMA mode = Linked List Buffer mode
4920*4882a593Smuzhiyun * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
4921*4882a593Smuzhiyun * <12> 1 Clear count of List Entry after fetching
4922*4882a593Smuzhiyun * <11..10> 00 Address mode = Increment
4923*4882a593Smuzhiyun * <9> 1 Terminate Buffer on RxBound
4924*4882a593Smuzhiyun * <8> 0 Bus Width = 16bits
4925*4882a593Smuzhiyun * <7..0> ? status Bits (write as 0s)
4926*4882a593Smuzhiyun *
4927*4882a593Smuzhiyun * 1111 0010 0000 0000 = 0xf200
4928*4882a593Smuzhiyun */
4929*4882a593Smuzhiyun
4930*4882a593Smuzhiyun usc_OutDmaReg( info, RDMR, 0xf200 );
4931*4882a593Smuzhiyun
4932*4882a593Smuzhiyun
4933*4882a593Smuzhiyun /* Transmit DMA mode Register (TDMR)
4934*4882a593Smuzhiyun *
4935*4882a593Smuzhiyun * <15..14> 11 DMA mode = Linked List Buffer mode
4936*4882a593Smuzhiyun * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
4937*4882a593Smuzhiyun * <12> 1 Clear count of List Entry after fetching
4938*4882a593Smuzhiyun * <11..10> 00 Address mode = Increment
4939*4882a593Smuzhiyun * <9> 1 Terminate Buffer on end of frame
4940*4882a593Smuzhiyun * <8> 0 Bus Width = 16bits
4941*4882a593Smuzhiyun * <7..0> ? status Bits (Read Only so write as 0)
4942*4882a593Smuzhiyun *
4943*4882a593Smuzhiyun * 1111 0010 0000 0000 = 0xf200
4944*4882a593Smuzhiyun */
4945*4882a593Smuzhiyun
4946*4882a593Smuzhiyun usc_OutDmaReg( info, TDMR, 0xf200 );
4947*4882a593Smuzhiyun
4948*4882a593Smuzhiyun
4949*4882a593Smuzhiyun /* DMA Interrupt Control Register (DICR)
4950*4882a593Smuzhiyun *
4951*4882a593Smuzhiyun * <15> 1 DMA Interrupt Enable
4952*4882a593Smuzhiyun * <14> 0 1 = Disable IEO from USC
4953*4882a593Smuzhiyun * <13> 0 1 = Don't provide vector during IntAck
4954*4882a593Smuzhiyun * <12> 1 1 = Include status in Vector
4955*4882a593Smuzhiyun * <10..2> 0 reserved, Must be 0s
4956*4882a593Smuzhiyun * <1> 0 1 = Rx DMA Interrupt Enabled
4957*4882a593Smuzhiyun * <0> 0 1 = Tx DMA Interrupt Enabled
4958*4882a593Smuzhiyun *
4959*4882a593Smuzhiyun * 1001 0000 0000 0000 = 0x9000
4960*4882a593Smuzhiyun */
4961*4882a593Smuzhiyun
4962*4882a593Smuzhiyun usc_OutDmaReg( info, DICR, 0x9000 );
4963*4882a593Smuzhiyun
4964*4882a593Smuzhiyun usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
4965*4882a593Smuzhiyun usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
4966*4882a593Smuzhiyun usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
4967*4882a593Smuzhiyun
4968*4882a593Smuzhiyun /* Channel Control Register (CCR)
4969*4882a593Smuzhiyun *
4970*4882a593Smuzhiyun * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
4971*4882a593Smuzhiyun * <13> 0 Trigger Tx on SW Command Disabled
4972*4882a593Smuzhiyun * <12> 0 Flag Preamble Disabled
4973*4882a593Smuzhiyun * <11..10> 00 Preamble Length
4974*4882a593Smuzhiyun * <9..8> 00 Preamble Pattern
4975*4882a593Smuzhiyun * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
4976*4882a593Smuzhiyun * <5> 0 Trigger Rx on SW Command Disabled
4977*4882a593Smuzhiyun * <4..0> 0 reserved
4978*4882a593Smuzhiyun *
4979*4882a593Smuzhiyun * 1000 0000 1000 0000 = 0x8080
4980*4882a593Smuzhiyun */
4981*4882a593Smuzhiyun
4982*4882a593Smuzhiyun RegValue = 0x8080;
4983*4882a593Smuzhiyun
4984*4882a593Smuzhiyun switch ( info->params.preamble_length ) {
4985*4882a593Smuzhiyun case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
4986*4882a593Smuzhiyun case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
4987*4882a593Smuzhiyun case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
4988*4882a593Smuzhiyun }
4989*4882a593Smuzhiyun
4990*4882a593Smuzhiyun switch ( info->params.preamble ) {
4991*4882a593Smuzhiyun case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
4992*4882a593Smuzhiyun case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
4993*4882a593Smuzhiyun case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
4994*4882a593Smuzhiyun case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
4995*4882a593Smuzhiyun }
4996*4882a593Smuzhiyun
4997*4882a593Smuzhiyun usc_OutReg( info, CCR, RegValue );
4998*4882a593Smuzhiyun
4999*4882a593Smuzhiyun
5000*4882a593Smuzhiyun /*
5001*4882a593Smuzhiyun * Burst/Dwell Control Register
5002*4882a593Smuzhiyun *
5003*4882a593Smuzhiyun * <15..8> 0x20 Maximum number of transfers per bus grant
5004*4882a593Smuzhiyun * <7..0> 0x00 Maximum number of clock cycles per bus grant
5005*4882a593Smuzhiyun */
5006*4882a593Smuzhiyun
5007*4882a593Smuzhiyun /* don't limit bus occupancy on PCI adapter */
5008*4882a593Smuzhiyun usc_OutDmaReg( info, BDCR, 0x0000 );
5009*4882a593Smuzhiyun
5010*4882a593Smuzhiyun usc_stop_transmitter(info);
5011*4882a593Smuzhiyun usc_stop_receiver(info);
5012*4882a593Smuzhiyun
5013*4882a593Smuzhiyun } /* end of usc_set_sdlc_mode() */
5014*4882a593Smuzhiyun
5015*4882a593Smuzhiyun /* usc_enable_loopback()
5016*4882a593Smuzhiyun *
5017*4882a593Smuzhiyun * Set the 16C32 for internal loopback mode.
5018*4882a593Smuzhiyun * The TxCLK and RxCLK signals are generated from the BRG0 and
5019*4882a593Smuzhiyun * the TxD is looped back to the RxD internally.
5020*4882a593Smuzhiyun *
5021*4882a593Smuzhiyun * Arguments: info pointer to device instance data
5022*4882a593Smuzhiyun * enable 1 = enable loopback, 0 = disable
5023*4882a593Smuzhiyun * Return Value: None
5024*4882a593Smuzhiyun */
usc_enable_loopback(struct mgsl_struct * info,int enable)5025*4882a593Smuzhiyun static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5026*4882a593Smuzhiyun {
5027*4882a593Smuzhiyun if (enable) {
5028*4882a593Smuzhiyun /* blank external TXD output */
5029*4882a593Smuzhiyun usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
5030*4882a593Smuzhiyun
5031*4882a593Smuzhiyun /* Clock mode Control Register (CMCR)
5032*4882a593Smuzhiyun *
5033*4882a593Smuzhiyun * <15..14> 00 counter 1 Disabled
5034*4882a593Smuzhiyun * <13..12> 00 counter 0 Disabled
5035*4882a593Smuzhiyun * <11..10> 11 BRG1 Input is TxC Pin
5036*4882a593Smuzhiyun * <9..8> 11 BRG0 Input is TxC Pin
5037*4882a593Smuzhiyun * <7..6> 01 DPLL Input is BRG1 Output
5038*4882a593Smuzhiyun * <5..3> 100 TxCLK comes from BRG0
5039*4882a593Smuzhiyun * <2..0> 100 RxCLK comes from BRG0
5040*4882a593Smuzhiyun *
5041*4882a593Smuzhiyun * 0000 1111 0110 0100 = 0x0f64
5042*4882a593Smuzhiyun */
5043*4882a593Smuzhiyun
5044*4882a593Smuzhiyun usc_OutReg( info, CMCR, 0x0f64 );
5045*4882a593Smuzhiyun
5046*4882a593Smuzhiyun /* Write 16-bit Time Constant for BRG0 */
5047*4882a593Smuzhiyun /* use clock speed if available, otherwise use 8 for diagnostics */
5048*4882a593Smuzhiyun if (info->params.clock_speed) {
5049*4882a593Smuzhiyun usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5050*4882a593Smuzhiyun } else
5051*4882a593Smuzhiyun usc_OutReg(info, TC0R, (u16)8);
5052*4882a593Smuzhiyun
5053*4882a593Smuzhiyun /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5054*4882a593Smuzhiyun mode = Continuous Set Bit 0 to enable BRG0. */
5055*4882a593Smuzhiyun usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5056*4882a593Smuzhiyun
5057*4882a593Smuzhiyun /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5058*4882a593Smuzhiyun usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5059*4882a593Smuzhiyun
5060*4882a593Smuzhiyun /* set Internal Data loopback mode */
5061*4882a593Smuzhiyun info->loopback_bits = 0x300;
5062*4882a593Smuzhiyun outw( 0x0300, info->io_base + CCAR );
5063*4882a593Smuzhiyun } else {
5064*4882a593Smuzhiyun /* enable external TXD output */
5065*4882a593Smuzhiyun usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
5066*4882a593Smuzhiyun
5067*4882a593Smuzhiyun /* clear Internal Data loopback mode */
5068*4882a593Smuzhiyun info->loopback_bits = 0;
5069*4882a593Smuzhiyun outw( 0,info->io_base + CCAR );
5070*4882a593Smuzhiyun }
5071*4882a593Smuzhiyun
5072*4882a593Smuzhiyun } /* end of usc_enable_loopback() */
5073*4882a593Smuzhiyun
5074*4882a593Smuzhiyun /* usc_enable_aux_clock()
5075*4882a593Smuzhiyun *
5076*4882a593Smuzhiyun * Enabled the AUX clock output at the specified frequency.
5077*4882a593Smuzhiyun *
5078*4882a593Smuzhiyun * Arguments:
5079*4882a593Smuzhiyun *
5080*4882a593Smuzhiyun * info pointer to device extension
5081*4882a593Smuzhiyun * data_rate data rate of clock in bits per second
5082*4882a593Smuzhiyun * A data rate of 0 disables the AUX clock.
5083*4882a593Smuzhiyun *
5084*4882a593Smuzhiyun * Return Value: None
5085*4882a593Smuzhiyun */
usc_enable_aux_clock(struct mgsl_struct * info,u32 data_rate)5086*4882a593Smuzhiyun static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5087*4882a593Smuzhiyun {
5088*4882a593Smuzhiyun u32 XtalSpeed;
5089*4882a593Smuzhiyun u16 Tc;
5090*4882a593Smuzhiyun
5091*4882a593Smuzhiyun if ( data_rate ) {
5092*4882a593Smuzhiyun XtalSpeed = 11059200;
5093*4882a593Smuzhiyun
5094*4882a593Smuzhiyun
5095*4882a593Smuzhiyun /* Tc = (Xtal/Speed) - 1 */
5096*4882a593Smuzhiyun /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5097*4882a593Smuzhiyun /* then rounding up gives a more precise time constant. Instead */
5098*4882a593Smuzhiyun /* of rounding up and then subtracting 1 we just don't subtract */
5099*4882a593Smuzhiyun /* the one in this case. */
5100*4882a593Smuzhiyun
5101*4882a593Smuzhiyun
5102*4882a593Smuzhiyun Tc = (u16)(XtalSpeed/data_rate);
5103*4882a593Smuzhiyun if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5104*4882a593Smuzhiyun Tc--;
5105*4882a593Smuzhiyun
5106*4882a593Smuzhiyun /* Write 16-bit Time Constant for BRG0 */
5107*4882a593Smuzhiyun usc_OutReg( info, TC0R, Tc );
5108*4882a593Smuzhiyun
5109*4882a593Smuzhiyun /*
5110*4882a593Smuzhiyun * Hardware Configuration Register (HCR)
5111*4882a593Smuzhiyun * Clear Bit 1, BRG0 mode = Continuous
5112*4882a593Smuzhiyun * Set Bit 0 to enable BRG0.
5113*4882a593Smuzhiyun */
5114*4882a593Smuzhiyun
5115*4882a593Smuzhiyun usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5116*4882a593Smuzhiyun
5117*4882a593Smuzhiyun /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5118*4882a593Smuzhiyun usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5119*4882a593Smuzhiyun } else {
5120*4882a593Smuzhiyun /* data rate == 0 so turn off BRG0 */
5121*4882a593Smuzhiyun usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5122*4882a593Smuzhiyun }
5123*4882a593Smuzhiyun
5124*4882a593Smuzhiyun } /* end of usc_enable_aux_clock() */
5125*4882a593Smuzhiyun
5126*4882a593Smuzhiyun /*
5127*4882a593Smuzhiyun *
5128*4882a593Smuzhiyun * usc_process_rxoverrun_sync()
5129*4882a593Smuzhiyun *
5130*4882a593Smuzhiyun * This function processes a receive overrun by resetting the
5131*4882a593Smuzhiyun * receive DMA buffers and issuing a Purge Rx FIFO command
5132*4882a593Smuzhiyun * to allow the receiver to continue receiving.
5133*4882a593Smuzhiyun *
5134*4882a593Smuzhiyun * Arguments:
5135*4882a593Smuzhiyun *
5136*4882a593Smuzhiyun * info pointer to device extension
5137*4882a593Smuzhiyun *
5138*4882a593Smuzhiyun * Return Value: None
5139*4882a593Smuzhiyun */
usc_process_rxoverrun_sync(struct mgsl_struct * info)5140*4882a593Smuzhiyun static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5141*4882a593Smuzhiyun {
5142*4882a593Smuzhiyun int start_index;
5143*4882a593Smuzhiyun int end_index;
5144*4882a593Smuzhiyun int frame_start_index;
5145*4882a593Smuzhiyun bool start_of_frame_found = false;
5146*4882a593Smuzhiyun bool end_of_frame_found = false;
5147*4882a593Smuzhiyun bool reprogram_dma = false;
5148*4882a593Smuzhiyun
5149*4882a593Smuzhiyun DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5150*4882a593Smuzhiyun u32 phys_addr;
5151*4882a593Smuzhiyun
5152*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5153*4882a593Smuzhiyun usc_RCmd( info, RCmd_EnterHuntmode );
5154*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeRxFifo );
5155*4882a593Smuzhiyun
5156*4882a593Smuzhiyun /* CurrentRxBuffer points to the 1st buffer of the next */
5157*4882a593Smuzhiyun /* possibly available receive frame. */
5158*4882a593Smuzhiyun
5159*4882a593Smuzhiyun frame_start_index = start_index = end_index = info->current_rx_buffer;
5160*4882a593Smuzhiyun
5161*4882a593Smuzhiyun /* Search for an unfinished string of buffers. This means */
5162*4882a593Smuzhiyun /* that a receive frame started (at least one buffer with */
5163*4882a593Smuzhiyun /* count set to zero) but there is no terminiting buffer */
5164*4882a593Smuzhiyun /* (status set to non-zero). */
5165*4882a593Smuzhiyun
5166*4882a593Smuzhiyun while( !buffer_list[end_index].count )
5167*4882a593Smuzhiyun {
5168*4882a593Smuzhiyun /* Count field has been reset to zero by 16C32. */
5169*4882a593Smuzhiyun /* This buffer is currently in use. */
5170*4882a593Smuzhiyun
5171*4882a593Smuzhiyun if ( !start_of_frame_found )
5172*4882a593Smuzhiyun {
5173*4882a593Smuzhiyun start_of_frame_found = true;
5174*4882a593Smuzhiyun frame_start_index = end_index;
5175*4882a593Smuzhiyun end_of_frame_found = false;
5176*4882a593Smuzhiyun }
5177*4882a593Smuzhiyun
5178*4882a593Smuzhiyun if ( buffer_list[end_index].status )
5179*4882a593Smuzhiyun {
5180*4882a593Smuzhiyun /* Status field has been set by 16C32. */
5181*4882a593Smuzhiyun /* This is the last buffer of a received frame. */
5182*4882a593Smuzhiyun
5183*4882a593Smuzhiyun /* We want to leave the buffers for this frame intact. */
5184*4882a593Smuzhiyun /* Move on to next possible frame. */
5185*4882a593Smuzhiyun
5186*4882a593Smuzhiyun start_of_frame_found = false;
5187*4882a593Smuzhiyun end_of_frame_found = true;
5188*4882a593Smuzhiyun }
5189*4882a593Smuzhiyun
5190*4882a593Smuzhiyun /* advance to next buffer entry in linked list */
5191*4882a593Smuzhiyun end_index++;
5192*4882a593Smuzhiyun if ( end_index == info->rx_buffer_count )
5193*4882a593Smuzhiyun end_index = 0;
5194*4882a593Smuzhiyun
5195*4882a593Smuzhiyun if ( start_index == end_index )
5196*4882a593Smuzhiyun {
5197*4882a593Smuzhiyun /* The entire list has been searched with all Counts == 0 and */
5198*4882a593Smuzhiyun /* all Status == 0. The receive buffers are */
5199*4882a593Smuzhiyun /* completely screwed, reset all receive buffers! */
5200*4882a593Smuzhiyun mgsl_reset_rx_dma_buffers( info );
5201*4882a593Smuzhiyun frame_start_index = 0;
5202*4882a593Smuzhiyun start_of_frame_found = false;
5203*4882a593Smuzhiyun reprogram_dma = true;
5204*4882a593Smuzhiyun break;
5205*4882a593Smuzhiyun }
5206*4882a593Smuzhiyun }
5207*4882a593Smuzhiyun
5208*4882a593Smuzhiyun if ( start_of_frame_found && !end_of_frame_found )
5209*4882a593Smuzhiyun {
5210*4882a593Smuzhiyun /* There is an unfinished string of receive DMA buffers */
5211*4882a593Smuzhiyun /* as a result of the receiver overrun. */
5212*4882a593Smuzhiyun
5213*4882a593Smuzhiyun /* Reset the buffers for the unfinished frame */
5214*4882a593Smuzhiyun /* and reprogram the receive DMA controller to start */
5215*4882a593Smuzhiyun /* at the 1st buffer of unfinished frame. */
5216*4882a593Smuzhiyun
5217*4882a593Smuzhiyun start_index = frame_start_index;
5218*4882a593Smuzhiyun
5219*4882a593Smuzhiyun do
5220*4882a593Smuzhiyun {
5221*4882a593Smuzhiyun *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5222*4882a593Smuzhiyun
5223*4882a593Smuzhiyun /* Adjust index for wrap around. */
5224*4882a593Smuzhiyun if ( start_index == info->rx_buffer_count )
5225*4882a593Smuzhiyun start_index = 0;
5226*4882a593Smuzhiyun
5227*4882a593Smuzhiyun } while( start_index != end_index );
5228*4882a593Smuzhiyun
5229*4882a593Smuzhiyun reprogram_dma = true;
5230*4882a593Smuzhiyun }
5231*4882a593Smuzhiyun
5232*4882a593Smuzhiyun if ( reprogram_dma )
5233*4882a593Smuzhiyun {
5234*4882a593Smuzhiyun usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5235*4882a593Smuzhiyun usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5236*4882a593Smuzhiyun usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5237*4882a593Smuzhiyun
5238*4882a593Smuzhiyun usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5239*4882a593Smuzhiyun
5240*4882a593Smuzhiyun /* This empties the receive FIFO and loads the RCC with RCLR */
5241*4882a593Smuzhiyun usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5242*4882a593Smuzhiyun
5243*4882a593Smuzhiyun /* program 16C32 with physical address of 1st DMA buffer entry */
5244*4882a593Smuzhiyun phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5245*4882a593Smuzhiyun usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5246*4882a593Smuzhiyun usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5247*4882a593Smuzhiyun
5248*4882a593Smuzhiyun usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5249*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5250*4882a593Smuzhiyun usc_EnableInterrupts( info, RECEIVE_STATUS );
5251*4882a593Smuzhiyun
5252*4882a593Smuzhiyun /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5253*4882a593Smuzhiyun /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5254*4882a593Smuzhiyun
5255*4882a593Smuzhiyun usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5256*4882a593Smuzhiyun usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5257*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_InitRxChannel );
5258*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5259*4882a593Smuzhiyun usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5260*4882a593Smuzhiyun else
5261*4882a593Smuzhiyun usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5262*4882a593Smuzhiyun }
5263*4882a593Smuzhiyun else
5264*4882a593Smuzhiyun {
5265*4882a593Smuzhiyun /* This empties the receive FIFO and loads the RCC with RCLR */
5266*4882a593Smuzhiyun usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5267*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeRxFifo );
5268*4882a593Smuzhiyun }
5269*4882a593Smuzhiyun
5270*4882a593Smuzhiyun } /* end of usc_process_rxoverrun_sync() */
5271*4882a593Smuzhiyun
5272*4882a593Smuzhiyun /* usc_stop_receiver()
5273*4882a593Smuzhiyun *
5274*4882a593Smuzhiyun * Disable USC receiver
5275*4882a593Smuzhiyun *
5276*4882a593Smuzhiyun * Arguments: info pointer to device instance data
5277*4882a593Smuzhiyun * Return Value: None
5278*4882a593Smuzhiyun */
usc_stop_receiver(struct mgsl_struct * info)5279*4882a593Smuzhiyun static void usc_stop_receiver( struct mgsl_struct *info )
5280*4882a593Smuzhiyun {
5281*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_ISR)
5282*4882a593Smuzhiyun printk("%s(%d):usc_stop_receiver(%s)\n",
5283*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
5284*4882a593Smuzhiyun
5285*4882a593Smuzhiyun /* Disable receive DMA channel. */
5286*4882a593Smuzhiyun /* This also disables receive DMA channel interrupts */
5287*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5288*4882a593Smuzhiyun
5289*4882a593Smuzhiyun usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5290*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5291*4882a593Smuzhiyun usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
5292*4882a593Smuzhiyun
5293*4882a593Smuzhiyun usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5294*4882a593Smuzhiyun
5295*4882a593Smuzhiyun /* This empties the receive FIFO and loads the RCC with RCLR */
5296*4882a593Smuzhiyun usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5297*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeRxFifo );
5298*4882a593Smuzhiyun
5299*4882a593Smuzhiyun info->rx_enabled = false;
5300*4882a593Smuzhiyun info->rx_overflow = false;
5301*4882a593Smuzhiyun info->rx_rcc_underrun = false;
5302*4882a593Smuzhiyun
5303*4882a593Smuzhiyun } /* end of stop_receiver() */
5304*4882a593Smuzhiyun
5305*4882a593Smuzhiyun /* usc_start_receiver()
5306*4882a593Smuzhiyun *
5307*4882a593Smuzhiyun * Enable the USC receiver
5308*4882a593Smuzhiyun *
5309*4882a593Smuzhiyun * Arguments: info pointer to device instance data
5310*4882a593Smuzhiyun * Return Value: None
5311*4882a593Smuzhiyun */
usc_start_receiver(struct mgsl_struct * info)5312*4882a593Smuzhiyun static void usc_start_receiver( struct mgsl_struct *info )
5313*4882a593Smuzhiyun {
5314*4882a593Smuzhiyun u32 phys_addr;
5315*4882a593Smuzhiyun
5316*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_ISR)
5317*4882a593Smuzhiyun printk("%s(%d):usc_start_receiver(%s)\n",
5318*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
5319*4882a593Smuzhiyun
5320*4882a593Smuzhiyun mgsl_reset_rx_dma_buffers( info );
5321*4882a593Smuzhiyun usc_stop_receiver( info );
5322*4882a593Smuzhiyun
5323*4882a593Smuzhiyun usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5324*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeRxFifo );
5325*4882a593Smuzhiyun
5326*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_HDLC ||
5327*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW ) {
5328*4882a593Smuzhiyun /* DMA mode Transfers */
5329*4882a593Smuzhiyun /* Program the DMA controller. */
5330*4882a593Smuzhiyun /* Enable the DMA controller end of buffer interrupt. */
5331*4882a593Smuzhiyun
5332*4882a593Smuzhiyun /* program 16C32 with physical address of 1st DMA buffer entry */
5333*4882a593Smuzhiyun phys_addr = info->rx_buffer_list[0].phys_entry;
5334*4882a593Smuzhiyun usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5335*4882a593Smuzhiyun usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5336*4882a593Smuzhiyun
5337*4882a593Smuzhiyun usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5338*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5339*4882a593Smuzhiyun usc_EnableInterrupts( info, RECEIVE_STATUS );
5340*4882a593Smuzhiyun
5341*4882a593Smuzhiyun /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5342*4882a593Smuzhiyun /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5343*4882a593Smuzhiyun
5344*4882a593Smuzhiyun usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5345*4882a593Smuzhiyun usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5346*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_InitRxChannel );
5347*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5348*4882a593Smuzhiyun usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5349*4882a593Smuzhiyun else
5350*4882a593Smuzhiyun usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5351*4882a593Smuzhiyun } else {
5352*4882a593Smuzhiyun usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5353*4882a593Smuzhiyun usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
5354*4882a593Smuzhiyun usc_EnableInterrupts(info, RECEIVE_DATA);
5355*4882a593Smuzhiyun
5356*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeRxFifo );
5357*4882a593Smuzhiyun usc_RCmd( info, RCmd_EnterHuntmode );
5358*4882a593Smuzhiyun
5359*4882a593Smuzhiyun usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5360*4882a593Smuzhiyun }
5361*4882a593Smuzhiyun
5362*4882a593Smuzhiyun usc_OutReg( info, CCSR, 0x1020 );
5363*4882a593Smuzhiyun
5364*4882a593Smuzhiyun info->rx_enabled = true;
5365*4882a593Smuzhiyun
5366*4882a593Smuzhiyun } /* end of usc_start_receiver() */
5367*4882a593Smuzhiyun
5368*4882a593Smuzhiyun /* usc_start_transmitter()
5369*4882a593Smuzhiyun *
5370*4882a593Smuzhiyun * Enable the USC transmitter and send a transmit frame if
5371*4882a593Smuzhiyun * one is loaded in the DMA buffers.
5372*4882a593Smuzhiyun *
5373*4882a593Smuzhiyun * Arguments: info pointer to device instance data
5374*4882a593Smuzhiyun * Return Value: None
5375*4882a593Smuzhiyun */
usc_start_transmitter(struct mgsl_struct * info)5376*4882a593Smuzhiyun static void usc_start_transmitter( struct mgsl_struct *info )
5377*4882a593Smuzhiyun {
5378*4882a593Smuzhiyun u32 phys_addr;
5379*4882a593Smuzhiyun unsigned int FrameSize;
5380*4882a593Smuzhiyun
5381*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_ISR)
5382*4882a593Smuzhiyun printk("%s(%d):usc_start_transmitter(%s)\n",
5383*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
5384*4882a593Smuzhiyun
5385*4882a593Smuzhiyun if ( info->xmit_cnt ) {
5386*4882a593Smuzhiyun
5387*4882a593Smuzhiyun /* If auto RTS enabled and RTS is inactive, then assert */
5388*4882a593Smuzhiyun /* RTS and set a flag indicating that the driver should */
5389*4882a593Smuzhiyun /* negate RTS when the transmission completes. */
5390*4882a593Smuzhiyun
5391*4882a593Smuzhiyun info->drop_rts_on_tx_done = false;
5392*4882a593Smuzhiyun
5393*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5394*4882a593Smuzhiyun usc_get_serial_signals( info );
5395*4882a593Smuzhiyun if ( !(info->serial_signals & SerialSignal_RTS) ) {
5396*4882a593Smuzhiyun info->serial_signals |= SerialSignal_RTS;
5397*4882a593Smuzhiyun usc_set_serial_signals( info );
5398*4882a593Smuzhiyun info->drop_rts_on_tx_done = true;
5399*4882a593Smuzhiyun }
5400*4882a593Smuzhiyun }
5401*4882a593Smuzhiyun
5402*4882a593Smuzhiyun
5403*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_ASYNC ) {
5404*4882a593Smuzhiyun if ( !info->tx_active ) {
5405*4882a593Smuzhiyun usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5406*4882a593Smuzhiyun usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5407*4882a593Smuzhiyun usc_EnableInterrupts(info, TRANSMIT_DATA);
5408*4882a593Smuzhiyun usc_load_txfifo(info);
5409*4882a593Smuzhiyun }
5410*4882a593Smuzhiyun } else {
5411*4882a593Smuzhiyun /* Disable transmit DMA controller while programming. */
5412*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5413*4882a593Smuzhiyun
5414*4882a593Smuzhiyun /* Transmit DMA buffer is loaded, so program USC */
5415*4882a593Smuzhiyun /* to send the frame contained in the buffers. */
5416*4882a593Smuzhiyun
5417*4882a593Smuzhiyun FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5418*4882a593Smuzhiyun
5419*4882a593Smuzhiyun /* if operating in Raw sync mode, reset the rcc component
5420*4882a593Smuzhiyun * of the tx dma buffer entry, otherwise, the serial controller
5421*4882a593Smuzhiyun * will send a closing sync char after this count.
5422*4882a593Smuzhiyun */
5423*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_RAW )
5424*4882a593Smuzhiyun info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5425*4882a593Smuzhiyun
5426*4882a593Smuzhiyun /* Program the Transmit Character Length Register (TCLR) */
5427*4882a593Smuzhiyun /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5428*4882a593Smuzhiyun usc_OutReg( info, TCLR, (u16)FrameSize );
5429*4882a593Smuzhiyun
5430*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeTxFifo );
5431*4882a593Smuzhiyun
5432*4882a593Smuzhiyun /* Program the address of the 1st DMA Buffer Entry in linked list */
5433*4882a593Smuzhiyun phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5434*4882a593Smuzhiyun usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5435*4882a593Smuzhiyun usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5436*4882a593Smuzhiyun
5437*4882a593Smuzhiyun usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5438*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5439*4882a593Smuzhiyun usc_EnableInterrupts( info, TRANSMIT_STATUS );
5440*4882a593Smuzhiyun
5441*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_RAW &&
5442*4882a593Smuzhiyun info->num_tx_dma_buffers > 1 ) {
5443*4882a593Smuzhiyun /* When running external sync mode, attempt to 'stream' transmit */
5444*4882a593Smuzhiyun /* by filling tx dma buffers as they become available. To do this */
5445*4882a593Smuzhiyun /* we need to enable Tx DMA EOB Status interrupts : */
5446*4882a593Smuzhiyun /* */
5447*4882a593Smuzhiyun /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5448*4882a593Smuzhiyun /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5449*4882a593Smuzhiyun
5450*4882a593Smuzhiyun usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5451*4882a593Smuzhiyun usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5452*4882a593Smuzhiyun }
5453*4882a593Smuzhiyun
5454*4882a593Smuzhiyun /* Initialize Transmit DMA Channel */
5455*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_InitTxChannel );
5456*4882a593Smuzhiyun
5457*4882a593Smuzhiyun usc_TCmd( info, TCmd_SendFrame );
5458*4882a593Smuzhiyun
5459*4882a593Smuzhiyun mod_timer(&info->tx_timer, jiffies +
5460*4882a593Smuzhiyun msecs_to_jiffies(5000));
5461*4882a593Smuzhiyun }
5462*4882a593Smuzhiyun info->tx_active = true;
5463*4882a593Smuzhiyun }
5464*4882a593Smuzhiyun
5465*4882a593Smuzhiyun if ( !info->tx_enabled ) {
5466*4882a593Smuzhiyun info->tx_enabled = true;
5467*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5468*4882a593Smuzhiyun usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5469*4882a593Smuzhiyun else
5470*4882a593Smuzhiyun usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5471*4882a593Smuzhiyun }
5472*4882a593Smuzhiyun
5473*4882a593Smuzhiyun } /* end of usc_start_transmitter() */
5474*4882a593Smuzhiyun
5475*4882a593Smuzhiyun /* usc_stop_transmitter()
5476*4882a593Smuzhiyun *
5477*4882a593Smuzhiyun * Stops the transmitter and DMA
5478*4882a593Smuzhiyun *
5479*4882a593Smuzhiyun * Arguments: info pointer to device isntance data
5480*4882a593Smuzhiyun * Return Value: None
5481*4882a593Smuzhiyun */
usc_stop_transmitter(struct mgsl_struct * info)5482*4882a593Smuzhiyun static void usc_stop_transmitter( struct mgsl_struct *info )
5483*4882a593Smuzhiyun {
5484*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_ISR)
5485*4882a593Smuzhiyun printk("%s(%d):usc_stop_transmitter(%s)\n",
5486*4882a593Smuzhiyun __FILE__,__LINE__, info->device_name );
5487*4882a593Smuzhiyun
5488*4882a593Smuzhiyun del_timer(&info->tx_timer);
5489*4882a593Smuzhiyun
5490*4882a593Smuzhiyun usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5491*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5492*4882a593Smuzhiyun usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5493*4882a593Smuzhiyun
5494*4882a593Smuzhiyun usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5495*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5496*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeTxFifo );
5497*4882a593Smuzhiyun
5498*4882a593Smuzhiyun info->tx_enabled = false;
5499*4882a593Smuzhiyun info->tx_active = false;
5500*4882a593Smuzhiyun
5501*4882a593Smuzhiyun } /* end of usc_stop_transmitter() */
5502*4882a593Smuzhiyun
5503*4882a593Smuzhiyun /* usc_load_txfifo()
5504*4882a593Smuzhiyun *
5505*4882a593Smuzhiyun * Fill the transmit FIFO until the FIFO is full or
5506*4882a593Smuzhiyun * there is no more data to load.
5507*4882a593Smuzhiyun *
5508*4882a593Smuzhiyun * Arguments: info pointer to device extension (instance data)
5509*4882a593Smuzhiyun * Return Value: None
5510*4882a593Smuzhiyun */
usc_load_txfifo(struct mgsl_struct * info)5511*4882a593Smuzhiyun static void usc_load_txfifo( struct mgsl_struct *info )
5512*4882a593Smuzhiyun {
5513*4882a593Smuzhiyun int Fifocount;
5514*4882a593Smuzhiyun u8 TwoBytes[2];
5515*4882a593Smuzhiyun
5516*4882a593Smuzhiyun if ( !info->xmit_cnt && !info->x_char )
5517*4882a593Smuzhiyun return;
5518*4882a593Smuzhiyun
5519*4882a593Smuzhiyun /* Select transmit FIFO status readback in TICR */
5520*4882a593Smuzhiyun usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5521*4882a593Smuzhiyun
5522*4882a593Smuzhiyun /* load the Transmit FIFO until FIFOs full or all data sent */
5523*4882a593Smuzhiyun
5524*4882a593Smuzhiyun while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5525*4882a593Smuzhiyun /* there is more space in the transmit FIFO and */
5526*4882a593Smuzhiyun /* there is more data in transmit buffer */
5527*4882a593Smuzhiyun
5528*4882a593Smuzhiyun if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5529*4882a593Smuzhiyun /* write a 16-bit word from transmit buffer to 16C32 */
5530*4882a593Smuzhiyun
5531*4882a593Smuzhiyun TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5532*4882a593Smuzhiyun info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5533*4882a593Smuzhiyun TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5534*4882a593Smuzhiyun info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5535*4882a593Smuzhiyun
5536*4882a593Smuzhiyun outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5537*4882a593Smuzhiyun
5538*4882a593Smuzhiyun info->xmit_cnt -= 2;
5539*4882a593Smuzhiyun info->icount.tx += 2;
5540*4882a593Smuzhiyun } else {
5541*4882a593Smuzhiyun /* only 1 byte left to transmit or 1 FIFO slot left */
5542*4882a593Smuzhiyun
5543*4882a593Smuzhiyun outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5544*4882a593Smuzhiyun info->io_base + CCAR );
5545*4882a593Smuzhiyun
5546*4882a593Smuzhiyun if (info->x_char) {
5547*4882a593Smuzhiyun /* transmit pending high priority char */
5548*4882a593Smuzhiyun outw( info->x_char,info->io_base + CCAR );
5549*4882a593Smuzhiyun info->x_char = 0;
5550*4882a593Smuzhiyun } else {
5551*4882a593Smuzhiyun outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5552*4882a593Smuzhiyun info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5553*4882a593Smuzhiyun info->xmit_cnt--;
5554*4882a593Smuzhiyun }
5555*4882a593Smuzhiyun info->icount.tx++;
5556*4882a593Smuzhiyun }
5557*4882a593Smuzhiyun }
5558*4882a593Smuzhiyun
5559*4882a593Smuzhiyun } /* end of usc_load_txfifo() */
5560*4882a593Smuzhiyun
5561*4882a593Smuzhiyun /* usc_reset()
5562*4882a593Smuzhiyun *
5563*4882a593Smuzhiyun * Reset the adapter to a known state and prepare it for further use.
5564*4882a593Smuzhiyun *
5565*4882a593Smuzhiyun * Arguments: info pointer to device instance data
5566*4882a593Smuzhiyun * Return Value: None
5567*4882a593Smuzhiyun */
usc_reset(struct mgsl_struct * info)5568*4882a593Smuzhiyun static void usc_reset( struct mgsl_struct *info )
5569*4882a593Smuzhiyun {
5570*4882a593Smuzhiyun int i;
5571*4882a593Smuzhiyun u32 readval;
5572*4882a593Smuzhiyun
5573*4882a593Smuzhiyun /* Set BIT30 of Misc Control Register */
5574*4882a593Smuzhiyun /* (Local Control Register 0x50) to force reset of USC. */
5575*4882a593Smuzhiyun
5576*4882a593Smuzhiyun volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5577*4882a593Smuzhiyun u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5578*4882a593Smuzhiyun
5579*4882a593Smuzhiyun info->misc_ctrl_value |= BIT30;
5580*4882a593Smuzhiyun *MiscCtrl = info->misc_ctrl_value;
5581*4882a593Smuzhiyun
5582*4882a593Smuzhiyun /*
5583*4882a593Smuzhiyun * Force at least 170ns delay before clearing reset bit. Each read from
5584*4882a593Smuzhiyun * LCR takes at least 30ns so 10 times for 300ns to be safe.
5585*4882a593Smuzhiyun */
5586*4882a593Smuzhiyun for(i=0;i<10;i++)
5587*4882a593Smuzhiyun readval = *MiscCtrl;
5588*4882a593Smuzhiyun
5589*4882a593Smuzhiyun info->misc_ctrl_value &= ~BIT30;
5590*4882a593Smuzhiyun *MiscCtrl = info->misc_ctrl_value;
5591*4882a593Smuzhiyun
5592*4882a593Smuzhiyun *LCR0BRDR = BUS_DESCRIPTOR(
5593*4882a593Smuzhiyun 1, // Write Strobe Hold (0-3)
5594*4882a593Smuzhiyun 2, // Write Strobe Delay (0-3)
5595*4882a593Smuzhiyun 2, // Read Strobe Delay (0-3)
5596*4882a593Smuzhiyun 0, // NWDD (Write data-data) (0-3)
5597*4882a593Smuzhiyun 4, // NWAD (Write Addr-data) (0-31)
5598*4882a593Smuzhiyun 0, // NXDA (Read/Write Data-Addr) (0-3)
5599*4882a593Smuzhiyun 0, // NRDD (Read Data-Data) (0-3)
5600*4882a593Smuzhiyun 5 // NRAD (Read Addr-Data) (0-31)
5601*4882a593Smuzhiyun );
5602*4882a593Smuzhiyun
5603*4882a593Smuzhiyun info->mbre_bit = 0;
5604*4882a593Smuzhiyun info->loopback_bits = 0;
5605*4882a593Smuzhiyun info->usc_idle_mode = 0;
5606*4882a593Smuzhiyun
5607*4882a593Smuzhiyun /*
5608*4882a593Smuzhiyun * Program the Bus Configuration Register (BCR)
5609*4882a593Smuzhiyun *
5610*4882a593Smuzhiyun * <15> 0 Don't use separate address
5611*4882a593Smuzhiyun * <14..6> 0 reserved
5612*4882a593Smuzhiyun * <5..4> 00 IAckmode = Default, don't care
5613*4882a593Smuzhiyun * <3> 1 Bus Request Totem Pole output
5614*4882a593Smuzhiyun * <2> 1 Use 16 Bit data bus
5615*4882a593Smuzhiyun * <1> 0 IRQ Totem Pole output
5616*4882a593Smuzhiyun * <0> 0 Don't Shift Right Addr
5617*4882a593Smuzhiyun *
5618*4882a593Smuzhiyun * 0000 0000 0000 1100 = 0x000c
5619*4882a593Smuzhiyun *
5620*4882a593Smuzhiyun * By writing to io_base + SDPIN the Wait/Ack pin is
5621*4882a593Smuzhiyun * programmed to work as a Wait pin.
5622*4882a593Smuzhiyun */
5623*4882a593Smuzhiyun
5624*4882a593Smuzhiyun outw( 0x000c,info->io_base + SDPIN );
5625*4882a593Smuzhiyun
5626*4882a593Smuzhiyun
5627*4882a593Smuzhiyun outw( 0,info->io_base );
5628*4882a593Smuzhiyun outw( 0,info->io_base + CCAR );
5629*4882a593Smuzhiyun
5630*4882a593Smuzhiyun /* select little endian byte ordering */
5631*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_SelectLittleEndian );
5632*4882a593Smuzhiyun
5633*4882a593Smuzhiyun
5634*4882a593Smuzhiyun /* Port Control Register (PCR)
5635*4882a593Smuzhiyun *
5636*4882a593Smuzhiyun * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5637*4882a593Smuzhiyun * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5638*4882a593Smuzhiyun * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5639*4882a593Smuzhiyun * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5640*4882a593Smuzhiyun * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5641*4882a593Smuzhiyun * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5642*4882a593Smuzhiyun * <3..2> 01 Port 1 is Input (Dedicated RxC)
5643*4882a593Smuzhiyun * <1..0> 01 Port 0 is Input (Dedicated TxC)
5644*4882a593Smuzhiyun *
5645*4882a593Smuzhiyun * 1111 0000 1111 0101 = 0xf0f5
5646*4882a593Smuzhiyun */
5647*4882a593Smuzhiyun
5648*4882a593Smuzhiyun usc_OutReg( info, PCR, 0xf0f5 );
5649*4882a593Smuzhiyun
5650*4882a593Smuzhiyun
5651*4882a593Smuzhiyun /*
5652*4882a593Smuzhiyun * Input/Output Control Register
5653*4882a593Smuzhiyun *
5654*4882a593Smuzhiyun * <15..14> 00 CTS is active low input
5655*4882a593Smuzhiyun * <13..12> 00 DCD is active low input
5656*4882a593Smuzhiyun * <11..10> 00 TxREQ pin is input (DSR)
5657*4882a593Smuzhiyun * <9..8> 00 RxREQ pin is input (RI)
5658*4882a593Smuzhiyun * <7..6> 00 TxD is output (Transmit Data)
5659*4882a593Smuzhiyun * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5660*4882a593Smuzhiyun * <2..0> 100 RxC is Output (drive with BRG0)
5661*4882a593Smuzhiyun *
5662*4882a593Smuzhiyun * 0000 0000 0000 0100 = 0x0004
5663*4882a593Smuzhiyun */
5664*4882a593Smuzhiyun
5665*4882a593Smuzhiyun usc_OutReg( info, IOCR, 0x0004 );
5666*4882a593Smuzhiyun
5667*4882a593Smuzhiyun } /* end of usc_reset() */
5668*4882a593Smuzhiyun
5669*4882a593Smuzhiyun /* usc_set_async_mode()
5670*4882a593Smuzhiyun *
5671*4882a593Smuzhiyun * Program adapter for asynchronous communications.
5672*4882a593Smuzhiyun *
5673*4882a593Smuzhiyun * Arguments: info pointer to device instance data
5674*4882a593Smuzhiyun * Return Value: None
5675*4882a593Smuzhiyun */
usc_set_async_mode(struct mgsl_struct * info)5676*4882a593Smuzhiyun static void usc_set_async_mode( struct mgsl_struct *info )
5677*4882a593Smuzhiyun {
5678*4882a593Smuzhiyun u16 RegValue;
5679*4882a593Smuzhiyun
5680*4882a593Smuzhiyun /* disable interrupts while programming USC */
5681*4882a593Smuzhiyun usc_DisableMasterIrqBit( info );
5682*4882a593Smuzhiyun
5683*4882a593Smuzhiyun outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5684*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5685*4882a593Smuzhiyun
5686*4882a593Smuzhiyun usc_loopback_frame( info );
5687*4882a593Smuzhiyun
5688*4882a593Smuzhiyun /* Channel mode Register (CMR)
5689*4882a593Smuzhiyun *
5690*4882a593Smuzhiyun * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5691*4882a593Smuzhiyun * <13..12> 00 00 = 16X Clock
5692*4882a593Smuzhiyun * <11..8> 0000 Transmitter mode = Asynchronous
5693*4882a593Smuzhiyun * <7..6> 00 reserved?
5694*4882a593Smuzhiyun * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5695*4882a593Smuzhiyun * <3..0> 0000 Receiver mode = Asynchronous
5696*4882a593Smuzhiyun *
5697*4882a593Smuzhiyun * 0000 0000 0000 0000 = 0x0
5698*4882a593Smuzhiyun */
5699*4882a593Smuzhiyun
5700*4882a593Smuzhiyun RegValue = 0;
5701*4882a593Smuzhiyun if ( info->params.stop_bits != 1 )
5702*4882a593Smuzhiyun RegValue |= BIT14;
5703*4882a593Smuzhiyun usc_OutReg( info, CMR, RegValue );
5704*4882a593Smuzhiyun
5705*4882a593Smuzhiyun
5706*4882a593Smuzhiyun /* Receiver mode Register (RMR)
5707*4882a593Smuzhiyun *
5708*4882a593Smuzhiyun * <15..13> 000 encoding = None
5709*4882a593Smuzhiyun * <12..08> 00000 reserved (Sync Only)
5710*4882a593Smuzhiyun * <7..6> 00 Even parity
5711*4882a593Smuzhiyun * <5> 0 parity disabled
5712*4882a593Smuzhiyun * <4..2> 000 Receive Char Length = 8 bits
5713*4882a593Smuzhiyun * <1..0> 00 Disable Receiver
5714*4882a593Smuzhiyun *
5715*4882a593Smuzhiyun * 0000 0000 0000 0000 = 0x0
5716*4882a593Smuzhiyun */
5717*4882a593Smuzhiyun
5718*4882a593Smuzhiyun RegValue = 0;
5719*4882a593Smuzhiyun
5720*4882a593Smuzhiyun if ( info->params.data_bits != 8 )
5721*4882a593Smuzhiyun RegValue |= BIT4 | BIT3 | BIT2;
5722*4882a593Smuzhiyun
5723*4882a593Smuzhiyun if ( info->params.parity != ASYNC_PARITY_NONE ) {
5724*4882a593Smuzhiyun RegValue |= BIT5;
5725*4882a593Smuzhiyun if ( info->params.parity != ASYNC_PARITY_ODD )
5726*4882a593Smuzhiyun RegValue |= BIT6;
5727*4882a593Smuzhiyun }
5728*4882a593Smuzhiyun
5729*4882a593Smuzhiyun usc_OutReg( info, RMR, RegValue );
5730*4882a593Smuzhiyun
5731*4882a593Smuzhiyun
5732*4882a593Smuzhiyun /* Set IRQ trigger level */
5733*4882a593Smuzhiyun
5734*4882a593Smuzhiyun usc_RCmd( info, RCmd_SelectRicrIntLevel );
5735*4882a593Smuzhiyun
5736*4882a593Smuzhiyun
5737*4882a593Smuzhiyun /* Receive Interrupt Control Register (RICR)
5738*4882a593Smuzhiyun *
5739*4882a593Smuzhiyun * <15..8> ? RxFIFO IRQ Request Level
5740*4882a593Smuzhiyun *
5741*4882a593Smuzhiyun * Note: For async mode the receive FIFO level must be set
5742*4882a593Smuzhiyun * to 0 to avoid the situation where the FIFO contains fewer bytes
5743*4882a593Smuzhiyun * than the trigger level and no more data is expected.
5744*4882a593Smuzhiyun *
5745*4882a593Smuzhiyun * <7> 0 Exited Hunt IA (Interrupt Arm)
5746*4882a593Smuzhiyun * <6> 0 Idle Received IA
5747*4882a593Smuzhiyun * <5> 0 Break/Abort IA
5748*4882a593Smuzhiyun * <4> 0 Rx Bound IA
5749*4882a593Smuzhiyun * <3> 0 Queued status reflects oldest byte in FIFO
5750*4882a593Smuzhiyun * <2> 0 Abort/PE IA
5751*4882a593Smuzhiyun * <1> 0 Rx Overrun IA
5752*4882a593Smuzhiyun * <0> 0 Select TC0 value for readback
5753*4882a593Smuzhiyun *
5754*4882a593Smuzhiyun * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5755*4882a593Smuzhiyun */
5756*4882a593Smuzhiyun
5757*4882a593Smuzhiyun usc_OutReg( info, RICR, 0x0000 );
5758*4882a593Smuzhiyun
5759*4882a593Smuzhiyun usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5760*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5761*4882a593Smuzhiyun
5762*4882a593Smuzhiyun
5763*4882a593Smuzhiyun /* Transmit mode Register (TMR)
5764*4882a593Smuzhiyun *
5765*4882a593Smuzhiyun * <15..13> 000 encoding = None
5766*4882a593Smuzhiyun * <12..08> 00000 reserved (Sync Only)
5767*4882a593Smuzhiyun * <7..6> 00 Transmit parity Even
5768*4882a593Smuzhiyun * <5> 0 Transmit parity Disabled
5769*4882a593Smuzhiyun * <4..2> 000 Tx Char Length = 8 bits
5770*4882a593Smuzhiyun * <1..0> 00 Disable Transmitter
5771*4882a593Smuzhiyun *
5772*4882a593Smuzhiyun * 0000 0000 0000 0000 = 0x0
5773*4882a593Smuzhiyun */
5774*4882a593Smuzhiyun
5775*4882a593Smuzhiyun RegValue = 0;
5776*4882a593Smuzhiyun
5777*4882a593Smuzhiyun if ( info->params.data_bits != 8 )
5778*4882a593Smuzhiyun RegValue |= BIT4 | BIT3 | BIT2;
5779*4882a593Smuzhiyun
5780*4882a593Smuzhiyun if ( info->params.parity != ASYNC_PARITY_NONE ) {
5781*4882a593Smuzhiyun RegValue |= BIT5;
5782*4882a593Smuzhiyun if ( info->params.parity != ASYNC_PARITY_ODD )
5783*4882a593Smuzhiyun RegValue |= BIT6;
5784*4882a593Smuzhiyun }
5785*4882a593Smuzhiyun
5786*4882a593Smuzhiyun usc_OutReg( info, TMR, RegValue );
5787*4882a593Smuzhiyun
5788*4882a593Smuzhiyun usc_set_txidle( info );
5789*4882a593Smuzhiyun
5790*4882a593Smuzhiyun
5791*4882a593Smuzhiyun /* Set IRQ trigger level */
5792*4882a593Smuzhiyun
5793*4882a593Smuzhiyun usc_TCmd( info, TCmd_SelectTicrIntLevel );
5794*4882a593Smuzhiyun
5795*4882a593Smuzhiyun
5796*4882a593Smuzhiyun /* Transmit Interrupt Control Register (TICR)
5797*4882a593Smuzhiyun *
5798*4882a593Smuzhiyun * <15..8> ? Transmit FIFO IRQ Level
5799*4882a593Smuzhiyun * <7> 0 Present IA (Interrupt Arm)
5800*4882a593Smuzhiyun * <6> 1 Idle Sent IA
5801*4882a593Smuzhiyun * <5> 0 Abort Sent IA
5802*4882a593Smuzhiyun * <4> 0 EOF/EOM Sent IA
5803*4882a593Smuzhiyun * <3> 0 CRC Sent IA
5804*4882a593Smuzhiyun * <2> 0 1 = Wait for SW Trigger to Start Frame
5805*4882a593Smuzhiyun * <1> 0 Tx Underrun IA
5806*4882a593Smuzhiyun * <0> 0 TC0 constant on read back
5807*4882a593Smuzhiyun *
5808*4882a593Smuzhiyun * 0000 0000 0100 0000 = 0x0040
5809*4882a593Smuzhiyun */
5810*4882a593Smuzhiyun
5811*4882a593Smuzhiyun usc_OutReg( info, TICR, 0x1f40 );
5812*4882a593Smuzhiyun
5813*4882a593Smuzhiyun usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5814*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5815*4882a593Smuzhiyun
5816*4882a593Smuzhiyun usc_enable_async_clock( info, info->params.data_rate );
5817*4882a593Smuzhiyun
5818*4882a593Smuzhiyun
5819*4882a593Smuzhiyun /* Channel Control/status Register (CCSR)
5820*4882a593Smuzhiyun *
5821*4882a593Smuzhiyun * <15> X RCC FIFO Overflow status (RO)
5822*4882a593Smuzhiyun * <14> X RCC FIFO Not Empty status (RO)
5823*4882a593Smuzhiyun * <13> 0 1 = Clear RCC FIFO (WO)
5824*4882a593Smuzhiyun * <12> X DPLL in Sync status (RO)
5825*4882a593Smuzhiyun * <11> X DPLL 2 Missed Clocks status (RO)
5826*4882a593Smuzhiyun * <10> X DPLL 1 Missed Clock status (RO)
5827*4882a593Smuzhiyun * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5828*4882a593Smuzhiyun * <7> X SDLC Loop On status (RO)
5829*4882a593Smuzhiyun * <6> X SDLC Loop Send status (RO)
5830*4882a593Smuzhiyun * <5> 1 Bypass counters for TxClk and RxClk (RW)
5831*4882a593Smuzhiyun * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5832*4882a593Smuzhiyun * <1..0> 00 reserved
5833*4882a593Smuzhiyun *
5834*4882a593Smuzhiyun * 0000 0000 0010 0000 = 0x0020
5835*4882a593Smuzhiyun */
5836*4882a593Smuzhiyun
5837*4882a593Smuzhiyun usc_OutReg( info, CCSR, 0x0020 );
5838*4882a593Smuzhiyun
5839*4882a593Smuzhiyun usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
5840*4882a593Smuzhiyun RECEIVE_DATA + RECEIVE_STATUS );
5841*4882a593Smuzhiyun
5842*4882a593Smuzhiyun usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
5843*4882a593Smuzhiyun RECEIVE_DATA + RECEIVE_STATUS );
5844*4882a593Smuzhiyun
5845*4882a593Smuzhiyun usc_EnableMasterIrqBit( info );
5846*4882a593Smuzhiyun
5847*4882a593Smuzhiyun if (info->params.loopback) {
5848*4882a593Smuzhiyun info->loopback_bits = 0x300;
5849*4882a593Smuzhiyun outw(0x0300, info->io_base + CCAR);
5850*4882a593Smuzhiyun }
5851*4882a593Smuzhiyun
5852*4882a593Smuzhiyun } /* end of usc_set_async_mode() */
5853*4882a593Smuzhiyun
5854*4882a593Smuzhiyun /* usc_loopback_frame()
5855*4882a593Smuzhiyun *
5856*4882a593Smuzhiyun * Loop back a small (2 byte) dummy SDLC frame.
5857*4882a593Smuzhiyun * Interrupts and DMA are NOT used. The purpose of this is to
5858*4882a593Smuzhiyun * clear any 'stale' status info left over from running in async mode.
5859*4882a593Smuzhiyun *
5860*4882a593Smuzhiyun * The 16C32 shows the strange behaviour of marking the 1st
5861*4882a593Smuzhiyun * received SDLC frame with a CRC error even when there is no
5862*4882a593Smuzhiyun * CRC error. To get around this a small dummy from of 2 bytes
5863*4882a593Smuzhiyun * is looped back when switching from async to sync mode.
5864*4882a593Smuzhiyun *
5865*4882a593Smuzhiyun * Arguments: info pointer to device instance data
5866*4882a593Smuzhiyun * Return Value: None
5867*4882a593Smuzhiyun */
usc_loopback_frame(struct mgsl_struct * info)5868*4882a593Smuzhiyun static void usc_loopback_frame( struct mgsl_struct *info )
5869*4882a593Smuzhiyun {
5870*4882a593Smuzhiyun int i;
5871*4882a593Smuzhiyun unsigned long oldmode = info->params.mode;
5872*4882a593Smuzhiyun
5873*4882a593Smuzhiyun info->params.mode = MGSL_MODE_HDLC;
5874*4882a593Smuzhiyun
5875*4882a593Smuzhiyun usc_DisableMasterIrqBit( info );
5876*4882a593Smuzhiyun
5877*4882a593Smuzhiyun usc_set_sdlc_mode( info );
5878*4882a593Smuzhiyun usc_enable_loopback( info, 1 );
5879*4882a593Smuzhiyun
5880*4882a593Smuzhiyun /* Write 16-bit Time Constant for BRG0 */
5881*4882a593Smuzhiyun usc_OutReg( info, TC0R, 0 );
5882*4882a593Smuzhiyun
5883*4882a593Smuzhiyun /* Channel Control Register (CCR)
5884*4882a593Smuzhiyun *
5885*4882a593Smuzhiyun * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
5886*4882a593Smuzhiyun * <13> 0 Trigger Tx on SW Command Disabled
5887*4882a593Smuzhiyun * <12> 0 Flag Preamble Disabled
5888*4882a593Smuzhiyun * <11..10> 00 Preamble Length = 8-Bits
5889*4882a593Smuzhiyun * <9..8> 01 Preamble Pattern = flags
5890*4882a593Smuzhiyun * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
5891*4882a593Smuzhiyun * <5> 0 Trigger Rx on SW Command Disabled
5892*4882a593Smuzhiyun * <4..0> 0 reserved
5893*4882a593Smuzhiyun *
5894*4882a593Smuzhiyun * 0000 0001 0000 0000 = 0x0100
5895*4882a593Smuzhiyun */
5896*4882a593Smuzhiyun
5897*4882a593Smuzhiyun usc_OutReg( info, CCR, 0x0100 );
5898*4882a593Smuzhiyun
5899*4882a593Smuzhiyun /* SETUP RECEIVER */
5900*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeRxFifo );
5901*4882a593Smuzhiyun usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5902*4882a593Smuzhiyun
5903*4882a593Smuzhiyun /* SETUP TRANSMITTER */
5904*4882a593Smuzhiyun /* Program the Transmit Character Length Register (TCLR) */
5905*4882a593Smuzhiyun /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5906*4882a593Smuzhiyun usc_OutReg( info, TCLR, 2 );
5907*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeTxFifo );
5908*4882a593Smuzhiyun
5909*4882a593Smuzhiyun /* unlatch Tx status bits, and start transmit channel. */
5910*4882a593Smuzhiyun usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
5911*4882a593Smuzhiyun outw(0,info->io_base + DATAREG);
5912*4882a593Smuzhiyun
5913*4882a593Smuzhiyun /* ENABLE TRANSMITTER */
5914*4882a593Smuzhiyun usc_TCmd( info, TCmd_SendFrame );
5915*4882a593Smuzhiyun usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5916*4882a593Smuzhiyun
5917*4882a593Smuzhiyun /* WAIT FOR RECEIVE COMPLETE */
5918*4882a593Smuzhiyun for (i=0 ; i<1000 ; i++)
5919*4882a593Smuzhiyun if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
5920*4882a593Smuzhiyun break;
5921*4882a593Smuzhiyun
5922*4882a593Smuzhiyun /* clear Internal Data loopback mode */
5923*4882a593Smuzhiyun usc_enable_loopback(info, 0);
5924*4882a593Smuzhiyun
5925*4882a593Smuzhiyun usc_EnableMasterIrqBit(info);
5926*4882a593Smuzhiyun
5927*4882a593Smuzhiyun info->params.mode = oldmode;
5928*4882a593Smuzhiyun
5929*4882a593Smuzhiyun } /* end of usc_loopback_frame() */
5930*4882a593Smuzhiyun
5931*4882a593Smuzhiyun /* usc_set_sync_mode() Programs the USC for SDLC communications.
5932*4882a593Smuzhiyun *
5933*4882a593Smuzhiyun * Arguments: info pointer to adapter info structure
5934*4882a593Smuzhiyun * Return Value: None
5935*4882a593Smuzhiyun */
usc_set_sync_mode(struct mgsl_struct * info)5936*4882a593Smuzhiyun static void usc_set_sync_mode( struct mgsl_struct *info )
5937*4882a593Smuzhiyun {
5938*4882a593Smuzhiyun usc_loopback_frame( info );
5939*4882a593Smuzhiyun usc_set_sdlc_mode( info );
5940*4882a593Smuzhiyun
5941*4882a593Smuzhiyun usc_enable_aux_clock(info, info->params.clock_speed);
5942*4882a593Smuzhiyun
5943*4882a593Smuzhiyun if (info->params.loopback)
5944*4882a593Smuzhiyun usc_enable_loopback(info,1);
5945*4882a593Smuzhiyun
5946*4882a593Smuzhiyun } /* end of mgsl_set_sync_mode() */
5947*4882a593Smuzhiyun
5948*4882a593Smuzhiyun /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
5949*4882a593Smuzhiyun *
5950*4882a593Smuzhiyun * Arguments: info pointer to device instance data
5951*4882a593Smuzhiyun * Return Value: None
5952*4882a593Smuzhiyun */
usc_set_txidle(struct mgsl_struct * info)5953*4882a593Smuzhiyun static void usc_set_txidle( struct mgsl_struct *info )
5954*4882a593Smuzhiyun {
5955*4882a593Smuzhiyun u16 usc_idle_mode = IDLEMODE_FLAGS;
5956*4882a593Smuzhiyun
5957*4882a593Smuzhiyun /* Map API idle mode to USC register bits */
5958*4882a593Smuzhiyun
5959*4882a593Smuzhiyun switch( info->idle_mode ){
5960*4882a593Smuzhiyun case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
5961*4882a593Smuzhiyun case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
5962*4882a593Smuzhiyun case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
5963*4882a593Smuzhiyun case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
5964*4882a593Smuzhiyun case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
5965*4882a593Smuzhiyun case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
5966*4882a593Smuzhiyun case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
5967*4882a593Smuzhiyun }
5968*4882a593Smuzhiyun
5969*4882a593Smuzhiyun info->usc_idle_mode = usc_idle_mode;
5970*4882a593Smuzhiyun //usc_OutReg(info, TCSR, usc_idle_mode);
5971*4882a593Smuzhiyun info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
5972*4882a593Smuzhiyun info->tcsr_value += usc_idle_mode;
5973*4882a593Smuzhiyun usc_OutReg(info, TCSR, info->tcsr_value);
5974*4882a593Smuzhiyun
5975*4882a593Smuzhiyun /*
5976*4882a593Smuzhiyun * if SyncLink WAN adapter is running in external sync mode, the
5977*4882a593Smuzhiyun * transmitter has been set to Monosync in order to try to mimic
5978*4882a593Smuzhiyun * a true raw outbound bit stream. Monosync still sends an open/close
5979*4882a593Smuzhiyun * sync char at the start/end of a frame. Try to match those sync
5980*4882a593Smuzhiyun * patterns to the idle mode set here
5981*4882a593Smuzhiyun */
5982*4882a593Smuzhiyun if ( info->params.mode == MGSL_MODE_RAW ) {
5983*4882a593Smuzhiyun unsigned char syncpat = 0;
5984*4882a593Smuzhiyun switch( info->idle_mode ) {
5985*4882a593Smuzhiyun case HDLC_TXIDLE_FLAGS:
5986*4882a593Smuzhiyun syncpat = 0x7e;
5987*4882a593Smuzhiyun break;
5988*4882a593Smuzhiyun case HDLC_TXIDLE_ALT_ZEROS_ONES:
5989*4882a593Smuzhiyun syncpat = 0x55;
5990*4882a593Smuzhiyun break;
5991*4882a593Smuzhiyun case HDLC_TXIDLE_ZEROS:
5992*4882a593Smuzhiyun case HDLC_TXIDLE_SPACE:
5993*4882a593Smuzhiyun syncpat = 0x00;
5994*4882a593Smuzhiyun break;
5995*4882a593Smuzhiyun case HDLC_TXIDLE_ONES:
5996*4882a593Smuzhiyun case HDLC_TXIDLE_MARK:
5997*4882a593Smuzhiyun syncpat = 0xff;
5998*4882a593Smuzhiyun break;
5999*4882a593Smuzhiyun case HDLC_TXIDLE_ALT_MARK_SPACE:
6000*4882a593Smuzhiyun syncpat = 0xaa;
6001*4882a593Smuzhiyun break;
6002*4882a593Smuzhiyun }
6003*4882a593Smuzhiyun
6004*4882a593Smuzhiyun usc_SetTransmitSyncChars(info,syncpat,syncpat);
6005*4882a593Smuzhiyun }
6006*4882a593Smuzhiyun
6007*4882a593Smuzhiyun } /* end of usc_set_txidle() */
6008*4882a593Smuzhiyun
6009*4882a593Smuzhiyun /* usc_get_serial_signals()
6010*4882a593Smuzhiyun *
6011*4882a593Smuzhiyun * Query the adapter for the state of the V24 status (input) signals.
6012*4882a593Smuzhiyun *
6013*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6014*4882a593Smuzhiyun * Return Value: None
6015*4882a593Smuzhiyun */
usc_get_serial_signals(struct mgsl_struct * info)6016*4882a593Smuzhiyun static void usc_get_serial_signals( struct mgsl_struct *info )
6017*4882a593Smuzhiyun {
6018*4882a593Smuzhiyun u16 status;
6019*4882a593Smuzhiyun
6020*4882a593Smuzhiyun /* clear all serial signals except RTS and DTR */
6021*4882a593Smuzhiyun info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
6022*4882a593Smuzhiyun
6023*4882a593Smuzhiyun /* Read the Misc Interrupt status Register (MISR) to get */
6024*4882a593Smuzhiyun /* the V24 status signals. */
6025*4882a593Smuzhiyun
6026*4882a593Smuzhiyun status = usc_InReg( info, MISR );
6027*4882a593Smuzhiyun
6028*4882a593Smuzhiyun /* set serial signal bits to reflect MISR */
6029*4882a593Smuzhiyun
6030*4882a593Smuzhiyun if ( status & MISCSTATUS_CTS )
6031*4882a593Smuzhiyun info->serial_signals |= SerialSignal_CTS;
6032*4882a593Smuzhiyun
6033*4882a593Smuzhiyun if ( status & MISCSTATUS_DCD )
6034*4882a593Smuzhiyun info->serial_signals |= SerialSignal_DCD;
6035*4882a593Smuzhiyun
6036*4882a593Smuzhiyun if ( status & MISCSTATUS_RI )
6037*4882a593Smuzhiyun info->serial_signals |= SerialSignal_RI;
6038*4882a593Smuzhiyun
6039*4882a593Smuzhiyun if ( status & MISCSTATUS_DSR )
6040*4882a593Smuzhiyun info->serial_signals |= SerialSignal_DSR;
6041*4882a593Smuzhiyun
6042*4882a593Smuzhiyun } /* end of usc_get_serial_signals() */
6043*4882a593Smuzhiyun
6044*4882a593Smuzhiyun /* usc_set_serial_signals()
6045*4882a593Smuzhiyun *
6046*4882a593Smuzhiyun * Set the state of RTS and DTR based on contents of
6047*4882a593Smuzhiyun * serial_signals member of device extension.
6048*4882a593Smuzhiyun *
6049*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6050*4882a593Smuzhiyun * Return Value: None
6051*4882a593Smuzhiyun */
usc_set_serial_signals(struct mgsl_struct * info)6052*4882a593Smuzhiyun static void usc_set_serial_signals( struct mgsl_struct *info )
6053*4882a593Smuzhiyun {
6054*4882a593Smuzhiyun u16 Control;
6055*4882a593Smuzhiyun unsigned char V24Out = info->serial_signals;
6056*4882a593Smuzhiyun
6057*4882a593Smuzhiyun /* get the current value of the Port Control Register (PCR) */
6058*4882a593Smuzhiyun
6059*4882a593Smuzhiyun Control = usc_InReg( info, PCR );
6060*4882a593Smuzhiyun
6061*4882a593Smuzhiyun if ( V24Out & SerialSignal_RTS )
6062*4882a593Smuzhiyun Control &= ~(BIT6);
6063*4882a593Smuzhiyun else
6064*4882a593Smuzhiyun Control |= BIT6;
6065*4882a593Smuzhiyun
6066*4882a593Smuzhiyun if ( V24Out & SerialSignal_DTR )
6067*4882a593Smuzhiyun Control &= ~(BIT4);
6068*4882a593Smuzhiyun else
6069*4882a593Smuzhiyun Control |= BIT4;
6070*4882a593Smuzhiyun
6071*4882a593Smuzhiyun usc_OutReg( info, PCR, Control );
6072*4882a593Smuzhiyun
6073*4882a593Smuzhiyun } /* end of usc_set_serial_signals() */
6074*4882a593Smuzhiyun
6075*4882a593Smuzhiyun /* usc_enable_async_clock()
6076*4882a593Smuzhiyun *
6077*4882a593Smuzhiyun * Enable the async clock at the specified frequency.
6078*4882a593Smuzhiyun *
6079*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6080*4882a593Smuzhiyun * data_rate data rate of clock in bps
6081*4882a593Smuzhiyun * 0 disables the AUX clock.
6082*4882a593Smuzhiyun * Return Value: None
6083*4882a593Smuzhiyun */
usc_enable_async_clock(struct mgsl_struct * info,u32 data_rate)6084*4882a593Smuzhiyun static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6085*4882a593Smuzhiyun {
6086*4882a593Smuzhiyun if ( data_rate ) {
6087*4882a593Smuzhiyun /*
6088*4882a593Smuzhiyun * Clock mode Control Register (CMCR)
6089*4882a593Smuzhiyun *
6090*4882a593Smuzhiyun * <15..14> 00 counter 1 Disabled
6091*4882a593Smuzhiyun * <13..12> 00 counter 0 Disabled
6092*4882a593Smuzhiyun * <11..10> 11 BRG1 Input is TxC Pin
6093*4882a593Smuzhiyun * <9..8> 11 BRG0 Input is TxC Pin
6094*4882a593Smuzhiyun * <7..6> 01 DPLL Input is BRG1 Output
6095*4882a593Smuzhiyun * <5..3> 100 TxCLK comes from BRG0
6096*4882a593Smuzhiyun * <2..0> 100 RxCLK comes from BRG0
6097*4882a593Smuzhiyun *
6098*4882a593Smuzhiyun * 0000 1111 0110 0100 = 0x0f64
6099*4882a593Smuzhiyun */
6100*4882a593Smuzhiyun
6101*4882a593Smuzhiyun usc_OutReg( info, CMCR, 0x0f64 );
6102*4882a593Smuzhiyun
6103*4882a593Smuzhiyun
6104*4882a593Smuzhiyun /*
6105*4882a593Smuzhiyun * Write 16-bit Time Constant for BRG0
6106*4882a593Smuzhiyun * Time Constant = (ClkSpeed / data_rate) - 1
6107*4882a593Smuzhiyun * ClkSpeed = 921600 (ISA), 691200 (PCI)
6108*4882a593Smuzhiyun */
6109*4882a593Smuzhiyun
6110*4882a593Smuzhiyun usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6111*4882a593Smuzhiyun
6112*4882a593Smuzhiyun /*
6113*4882a593Smuzhiyun * Hardware Configuration Register (HCR)
6114*4882a593Smuzhiyun * Clear Bit 1, BRG0 mode = Continuous
6115*4882a593Smuzhiyun * Set Bit 0 to enable BRG0.
6116*4882a593Smuzhiyun */
6117*4882a593Smuzhiyun
6118*4882a593Smuzhiyun usc_OutReg( info, HCR,
6119*4882a593Smuzhiyun (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6120*4882a593Smuzhiyun
6121*4882a593Smuzhiyun
6122*4882a593Smuzhiyun /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6123*4882a593Smuzhiyun
6124*4882a593Smuzhiyun usc_OutReg( info, IOCR,
6125*4882a593Smuzhiyun (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6126*4882a593Smuzhiyun } else {
6127*4882a593Smuzhiyun /* data rate == 0 so turn off BRG0 */
6128*4882a593Smuzhiyun usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6129*4882a593Smuzhiyun }
6130*4882a593Smuzhiyun
6131*4882a593Smuzhiyun } /* end of usc_enable_async_clock() */
6132*4882a593Smuzhiyun
6133*4882a593Smuzhiyun /*
6134*4882a593Smuzhiyun * Buffer Structures:
6135*4882a593Smuzhiyun *
6136*4882a593Smuzhiyun * Normal memory access uses virtual addresses that can make discontiguous
6137*4882a593Smuzhiyun * physical memory pages appear to be contiguous in the virtual address
6138*4882a593Smuzhiyun * space (the processors memory mapping handles the conversions).
6139*4882a593Smuzhiyun *
6140*4882a593Smuzhiyun * DMA transfers require physically contiguous memory. This is because
6141*4882a593Smuzhiyun * the DMA system controller and DMA bus masters deal with memory using
6142*4882a593Smuzhiyun * only physical addresses.
6143*4882a593Smuzhiyun *
6144*4882a593Smuzhiyun * This causes a problem under Windows NT when large DMA buffers are
6145*4882a593Smuzhiyun * needed. Fragmentation of the nonpaged pool prevents allocations of
6146*4882a593Smuzhiyun * physically contiguous buffers larger than the PAGE_SIZE.
6147*4882a593Smuzhiyun *
6148*4882a593Smuzhiyun * However the 16C32 supports Bus Master Scatter/Gather DMA which
6149*4882a593Smuzhiyun * allows DMA transfers to physically discontiguous buffers. Information
6150*4882a593Smuzhiyun * about each data transfer buffer is contained in a memory structure
6151*4882a593Smuzhiyun * called a 'buffer entry'. A list of buffer entries is maintained
6152*4882a593Smuzhiyun * to track and control the use of the data transfer buffers.
6153*4882a593Smuzhiyun *
6154*4882a593Smuzhiyun * To support this strategy we will allocate sufficient PAGE_SIZE
6155*4882a593Smuzhiyun * contiguous memory buffers to allow for the total required buffer
6156*4882a593Smuzhiyun * space.
6157*4882a593Smuzhiyun *
6158*4882a593Smuzhiyun * The 16C32 accesses the list of buffer entries using Bus Master
6159*4882a593Smuzhiyun * DMA. Control information is read from the buffer entries by the
6160*4882a593Smuzhiyun * 16C32 to control data transfers. status information is written to
6161*4882a593Smuzhiyun * the buffer entries by the 16C32 to indicate the status of completed
6162*4882a593Smuzhiyun * transfers.
6163*4882a593Smuzhiyun *
6164*4882a593Smuzhiyun * The CPU writes control information to the buffer entries to control
6165*4882a593Smuzhiyun * the 16C32 and reads status information from the buffer entries to
6166*4882a593Smuzhiyun * determine information about received and transmitted frames.
6167*4882a593Smuzhiyun *
6168*4882a593Smuzhiyun * Because the CPU and 16C32 (adapter) both need simultaneous access
6169*4882a593Smuzhiyun * to the buffer entries, the buffer entry memory is allocated with
6170*4882a593Smuzhiyun * HalAllocateCommonBuffer(). This restricts the size of the buffer
6171*4882a593Smuzhiyun * entry list to PAGE_SIZE.
6172*4882a593Smuzhiyun *
6173*4882a593Smuzhiyun * The actual data buffers on the other hand will only be accessed
6174*4882a593Smuzhiyun * by the CPU or the adapter but not by both simultaneously. This allows
6175*4882a593Smuzhiyun * Scatter/Gather packet based DMA procedures for using physically
6176*4882a593Smuzhiyun * discontiguous pages.
6177*4882a593Smuzhiyun */
6178*4882a593Smuzhiyun
6179*4882a593Smuzhiyun /*
6180*4882a593Smuzhiyun * mgsl_reset_tx_dma_buffers()
6181*4882a593Smuzhiyun *
6182*4882a593Smuzhiyun * Set the count for all transmit buffers to 0 to indicate the
6183*4882a593Smuzhiyun * buffer is available for use and set the current buffer to the
6184*4882a593Smuzhiyun * first buffer. This effectively makes all buffers free and
6185*4882a593Smuzhiyun * discards any data in buffers.
6186*4882a593Smuzhiyun *
6187*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6188*4882a593Smuzhiyun * Return Value: None
6189*4882a593Smuzhiyun */
mgsl_reset_tx_dma_buffers(struct mgsl_struct * info)6190*4882a593Smuzhiyun static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6191*4882a593Smuzhiyun {
6192*4882a593Smuzhiyun unsigned int i;
6193*4882a593Smuzhiyun
6194*4882a593Smuzhiyun for ( i = 0; i < info->tx_buffer_count; i++ ) {
6195*4882a593Smuzhiyun *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6196*4882a593Smuzhiyun }
6197*4882a593Smuzhiyun
6198*4882a593Smuzhiyun info->current_tx_buffer = 0;
6199*4882a593Smuzhiyun info->start_tx_dma_buffer = 0;
6200*4882a593Smuzhiyun info->tx_dma_buffers_used = 0;
6201*4882a593Smuzhiyun
6202*4882a593Smuzhiyun info->get_tx_holding_index = 0;
6203*4882a593Smuzhiyun info->put_tx_holding_index = 0;
6204*4882a593Smuzhiyun info->tx_holding_count = 0;
6205*4882a593Smuzhiyun
6206*4882a593Smuzhiyun } /* end of mgsl_reset_tx_dma_buffers() */
6207*4882a593Smuzhiyun
6208*4882a593Smuzhiyun /*
6209*4882a593Smuzhiyun * num_free_tx_dma_buffers()
6210*4882a593Smuzhiyun *
6211*4882a593Smuzhiyun * returns the number of free tx dma buffers available
6212*4882a593Smuzhiyun *
6213*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6214*4882a593Smuzhiyun * Return Value: number of free tx dma buffers
6215*4882a593Smuzhiyun */
num_free_tx_dma_buffers(struct mgsl_struct * info)6216*4882a593Smuzhiyun static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6217*4882a593Smuzhiyun {
6218*4882a593Smuzhiyun return info->tx_buffer_count - info->tx_dma_buffers_used;
6219*4882a593Smuzhiyun }
6220*4882a593Smuzhiyun
6221*4882a593Smuzhiyun /*
6222*4882a593Smuzhiyun * mgsl_reset_rx_dma_buffers()
6223*4882a593Smuzhiyun *
6224*4882a593Smuzhiyun * Set the count for all receive buffers to DMABUFFERSIZE
6225*4882a593Smuzhiyun * and set the current buffer to the first buffer. This effectively
6226*4882a593Smuzhiyun * makes all buffers free and discards any data in buffers.
6227*4882a593Smuzhiyun *
6228*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6229*4882a593Smuzhiyun * Return Value: None
6230*4882a593Smuzhiyun */
mgsl_reset_rx_dma_buffers(struct mgsl_struct * info)6231*4882a593Smuzhiyun static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6232*4882a593Smuzhiyun {
6233*4882a593Smuzhiyun unsigned int i;
6234*4882a593Smuzhiyun
6235*4882a593Smuzhiyun for ( i = 0; i < info->rx_buffer_count; i++ ) {
6236*4882a593Smuzhiyun *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6237*4882a593Smuzhiyun // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6238*4882a593Smuzhiyun // info->rx_buffer_list[i].status = 0;
6239*4882a593Smuzhiyun }
6240*4882a593Smuzhiyun
6241*4882a593Smuzhiyun info->current_rx_buffer = 0;
6242*4882a593Smuzhiyun
6243*4882a593Smuzhiyun } /* end of mgsl_reset_rx_dma_buffers() */
6244*4882a593Smuzhiyun
6245*4882a593Smuzhiyun /*
6246*4882a593Smuzhiyun * mgsl_free_rx_frame_buffers()
6247*4882a593Smuzhiyun *
6248*4882a593Smuzhiyun * Free the receive buffers used by a received SDLC
6249*4882a593Smuzhiyun * frame such that the buffers can be reused.
6250*4882a593Smuzhiyun *
6251*4882a593Smuzhiyun * Arguments:
6252*4882a593Smuzhiyun *
6253*4882a593Smuzhiyun * info pointer to device instance data
6254*4882a593Smuzhiyun * StartIndex index of 1st receive buffer of frame
6255*4882a593Smuzhiyun * EndIndex index of last receive buffer of frame
6256*4882a593Smuzhiyun *
6257*4882a593Smuzhiyun * Return Value: None
6258*4882a593Smuzhiyun */
mgsl_free_rx_frame_buffers(struct mgsl_struct * info,unsigned int StartIndex,unsigned int EndIndex)6259*4882a593Smuzhiyun static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6260*4882a593Smuzhiyun {
6261*4882a593Smuzhiyun bool Done = false;
6262*4882a593Smuzhiyun DMABUFFERENTRY *pBufEntry;
6263*4882a593Smuzhiyun unsigned int Index;
6264*4882a593Smuzhiyun
6265*4882a593Smuzhiyun /* Starting with 1st buffer entry of the frame clear the status */
6266*4882a593Smuzhiyun /* field and set the count field to DMA Buffer Size. */
6267*4882a593Smuzhiyun
6268*4882a593Smuzhiyun Index = StartIndex;
6269*4882a593Smuzhiyun
6270*4882a593Smuzhiyun while( !Done ) {
6271*4882a593Smuzhiyun pBufEntry = &(info->rx_buffer_list[Index]);
6272*4882a593Smuzhiyun
6273*4882a593Smuzhiyun if ( Index == EndIndex ) {
6274*4882a593Smuzhiyun /* This is the last buffer of the frame! */
6275*4882a593Smuzhiyun Done = true;
6276*4882a593Smuzhiyun }
6277*4882a593Smuzhiyun
6278*4882a593Smuzhiyun /* reset current buffer for reuse */
6279*4882a593Smuzhiyun // pBufEntry->status = 0;
6280*4882a593Smuzhiyun // pBufEntry->count = DMABUFFERSIZE;
6281*4882a593Smuzhiyun *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6282*4882a593Smuzhiyun
6283*4882a593Smuzhiyun /* advance to next buffer entry in linked list */
6284*4882a593Smuzhiyun Index++;
6285*4882a593Smuzhiyun if ( Index == info->rx_buffer_count )
6286*4882a593Smuzhiyun Index = 0;
6287*4882a593Smuzhiyun }
6288*4882a593Smuzhiyun
6289*4882a593Smuzhiyun /* set current buffer to next buffer after last buffer of frame */
6290*4882a593Smuzhiyun info->current_rx_buffer = Index;
6291*4882a593Smuzhiyun
6292*4882a593Smuzhiyun } /* end of free_rx_frame_buffers() */
6293*4882a593Smuzhiyun
6294*4882a593Smuzhiyun /* mgsl_get_rx_frame()
6295*4882a593Smuzhiyun *
6296*4882a593Smuzhiyun * This function attempts to return a received SDLC frame from the
6297*4882a593Smuzhiyun * receive DMA buffers. Only frames received without errors are returned.
6298*4882a593Smuzhiyun *
6299*4882a593Smuzhiyun * Arguments: info pointer to device extension
6300*4882a593Smuzhiyun * Return Value: true if frame returned, otherwise false
6301*4882a593Smuzhiyun */
mgsl_get_rx_frame(struct mgsl_struct * info)6302*4882a593Smuzhiyun static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6303*4882a593Smuzhiyun {
6304*4882a593Smuzhiyun unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6305*4882a593Smuzhiyun unsigned short status;
6306*4882a593Smuzhiyun DMABUFFERENTRY *pBufEntry;
6307*4882a593Smuzhiyun unsigned int framesize = 0;
6308*4882a593Smuzhiyun bool ReturnCode = false;
6309*4882a593Smuzhiyun unsigned long flags;
6310*4882a593Smuzhiyun struct tty_struct *tty = info->port.tty;
6311*4882a593Smuzhiyun bool return_frame = false;
6312*4882a593Smuzhiyun
6313*4882a593Smuzhiyun /*
6314*4882a593Smuzhiyun * current_rx_buffer points to the 1st buffer of the next available
6315*4882a593Smuzhiyun * receive frame. To find the last buffer of the frame look for
6316*4882a593Smuzhiyun * a non-zero status field in the buffer entries. (The status
6317*4882a593Smuzhiyun * field is set by the 16C32 after completing a receive frame.
6318*4882a593Smuzhiyun */
6319*4882a593Smuzhiyun
6320*4882a593Smuzhiyun StartIndex = EndIndex = info->current_rx_buffer;
6321*4882a593Smuzhiyun
6322*4882a593Smuzhiyun while( !info->rx_buffer_list[EndIndex].status ) {
6323*4882a593Smuzhiyun /*
6324*4882a593Smuzhiyun * If the count field of the buffer entry is non-zero then
6325*4882a593Smuzhiyun * this buffer has not been used. (The 16C32 clears the count
6326*4882a593Smuzhiyun * field when it starts using the buffer.) If an unused buffer
6327*4882a593Smuzhiyun * is encountered then there are no frames available.
6328*4882a593Smuzhiyun */
6329*4882a593Smuzhiyun
6330*4882a593Smuzhiyun if ( info->rx_buffer_list[EndIndex].count )
6331*4882a593Smuzhiyun goto Cleanup;
6332*4882a593Smuzhiyun
6333*4882a593Smuzhiyun /* advance to next buffer entry in linked list */
6334*4882a593Smuzhiyun EndIndex++;
6335*4882a593Smuzhiyun if ( EndIndex == info->rx_buffer_count )
6336*4882a593Smuzhiyun EndIndex = 0;
6337*4882a593Smuzhiyun
6338*4882a593Smuzhiyun /* if entire list searched then no frame available */
6339*4882a593Smuzhiyun if ( EndIndex == StartIndex ) {
6340*4882a593Smuzhiyun /* If this occurs then something bad happened,
6341*4882a593Smuzhiyun * all buffers have been 'used' but none mark
6342*4882a593Smuzhiyun * the end of a frame. Reset buffers and receiver.
6343*4882a593Smuzhiyun */
6344*4882a593Smuzhiyun
6345*4882a593Smuzhiyun if ( info->rx_enabled ){
6346*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6347*4882a593Smuzhiyun usc_start_receiver(info);
6348*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6349*4882a593Smuzhiyun }
6350*4882a593Smuzhiyun goto Cleanup;
6351*4882a593Smuzhiyun }
6352*4882a593Smuzhiyun }
6353*4882a593Smuzhiyun
6354*4882a593Smuzhiyun
6355*4882a593Smuzhiyun /* check status of receive frame */
6356*4882a593Smuzhiyun
6357*4882a593Smuzhiyun status = info->rx_buffer_list[EndIndex].status;
6358*4882a593Smuzhiyun
6359*4882a593Smuzhiyun if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6360*4882a593Smuzhiyun RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6361*4882a593Smuzhiyun if ( status & RXSTATUS_SHORT_FRAME )
6362*4882a593Smuzhiyun info->icount.rxshort++;
6363*4882a593Smuzhiyun else if ( status & RXSTATUS_ABORT )
6364*4882a593Smuzhiyun info->icount.rxabort++;
6365*4882a593Smuzhiyun else if ( status & RXSTATUS_OVERRUN )
6366*4882a593Smuzhiyun info->icount.rxover++;
6367*4882a593Smuzhiyun else {
6368*4882a593Smuzhiyun info->icount.rxcrc++;
6369*4882a593Smuzhiyun if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6370*4882a593Smuzhiyun return_frame = true;
6371*4882a593Smuzhiyun }
6372*4882a593Smuzhiyun framesize = 0;
6373*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
6374*4882a593Smuzhiyun {
6375*4882a593Smuzhiyun info->netdev->stats.rx_errors++;
6376*4882a593Smuzhiyun info->netdev->stats.rx_frame_errors++;
6377*4882a593Smuzhiyun }
6378*4882a593Smuzhiyun #endif
6379*4882a593Smuzhiyun } else
6380*4882a593Smuzhiyun return_frame = true;
6381*4882a593Smuzhiyun
6382*4882a593Smuzhiyun if ( return_frame ) {
6383*4882a593Smuzhiyun /* receive frame has no errors, get frame size.
6384*4882a593Smuzhiyun * The frame size is the starting value of the RCC (which was
6385*4882a593Smuzhiyun * set to 0xffff) minus the ending value of the RCC (decremented
6386*4882a593Smuzhiyun * once for each receive character) minus 2 for the 16-bit CRC.
6387*4882a593Smuzhiyun */
6388*4882a593Smuzhiyun
6389*4882a593Smuzhiyun framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6390*4882a593Smuzhiyun
6391*4882a593Smuzhiyun /* adjust frame size for CRC if any */
6392*4882a593Smuzhiyun if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6393*4882a593Smuzhiyun framesize -= 2;
6394*4882a593Smuzhiyun else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6395*4882a593Smuzhiyun framesize -= 4;
6396*4882a593Smuzhiyun }
6397*4882a593Smuzhiyun
6398*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_BH )
6399*4882a593Smuzhiyun printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6400*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,status,framesize);
6401*4882a593Smuzhiyun
6402*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_DATA )
6403*4882a593Smuzhiyun mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6404*4882a593Smuzhiyun min_t(int, framesize, DMABUFFERSIZE),0);
6405*4882a593Smuzhiyun
6406*4882a593Smuzhiyun if (framesize) {
6407*4882a593Smuzhiyun if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6408*4882a593Smuzhiyun ((framesize+1) > info->max_frame_size) ) ||
6409*4882a593Smuzhiyun (framesize > info->max_frame_size) )
6410*4882a593Smuzhiyun info->icount.rxlong++;
6411*4882a593Smuzhiyun else {
6412*4882a593Smuzhiyun /* copy dma buffer(s) to contiguous intermediate buffer */
6413*4882a593Smuzhiyun int copy_count = framesize;
6414*4882a593Smuzhiyun int index = StartIndex;
6415*4882a593Smuzhiyun unsigned char *ptmp = info->intermediate_rxbuffer;
6416*4882a593Smuzhiyun
6417*4882a593Smuzhiyun if ( !(status & RXSTATUS_CRC_ERROR))
6418*4882a593Smuzhiyun info->icount.rxok++;
6419*4882a593Smuzhiyun
6420*4882a593Smuzhiyun while(copy_count) {
6421*4882a593Smuzhiyun int partial_count;
6422*4882a593Smuzhiyun if ( copy_count > DMABUFFERSIZE )
6423*4882a593Smuzhiyun partial_count = DMABUFFERSIZE;
6424*4882a593Smuzhiyun else
6425*4882a593Smuzhiyun partial_count = copy_count;
6426*4882a593Smuzhiyun
6427*4882a593Smuzhiyun pBufEntry = &(info->rx_buffer_list[index]);
6428*4882a593Smuzhiyun memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6429*4882a593Smuzhiyun ptmp += partial_count;
6430*4882a593Smuzhiyun copy_count -= partial_count;
6431*4882a593Smuzhiyun
6432*4882a593Smuzhiyun if ( ++index == info->rx_buffer_count )
6433*4882a593Smuzhiyun index = 0;
6434*4882a593Smuzhiyun }
6435*4882a593Smuzhiyun
6436*4882a593Smuzhiyun if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6437*4882a593Smuzhiyun ++framesize;
6438*4882a593Smuzhiyun *ptmp = (status & RXSTATUS_CRC_ERROR ?
6439*4882a593Smuzhiyun RX_CRC_ERROR :
6440*4882a593Smuzhiyun RX_OK);
6441*4882a593Smuzhiyun
6442*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_DATA )
6443*4882a593Smuzhiyun printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6444*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,
6445*4882a593Smuzhiyun *ptmp);
6446*4882a593Smuzhiyun }
6447*4882a593Smuzhiyun
6448*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
6449*4882a593Smuzhiyun if (info->netcount)
6450*4882a593Smuzhiyun hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6451*4882a593Smuzhiyun else
6452*4882a593Smuzhiyun #endif
6453*4882a593Smuzhiyun ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6454*4882a593Smuzhiyun }
6455*4882a593Smuzhiyun }
6456*4882a593Smuzhiyun /* Free the buffers used by this frame. */
6457*4882a593Smuzhiyun mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6458*4882a593Smuzhiyun
6459*4882a593Smuzhiyun ReturnCode = true;
6460*4882a593Smuzhiyun
6461*4882a593Smuzhiyun Cleanup:
6462*4882a593Smuzhiyun
6463*4882a593Smuzhiyun if ( info->rx_enabled && info->rx_overflow ) {
6464*4882a593Smuzhiyun /* The receiver needs to restarted because of
6465*4882a593Smuzhiyun * a receive overflow (buffer or FIFO). If the
6466*4882a593Smuzhiyun * receive buffers are now empty, then restart receiver.
6467*4882a593Smuzhiyun */
6468*4882a593Smuzhiyun
6469*4882a593Smuzhiyun if ( !info->rx_buffer_list[EndIndex].status &&
6470*4882a593Smuzhiyun info->rx_buffer_list[EndIndex].count ) {
6471*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6472*4882a593Smuzhiyun usc_start_receiver(info);
6473*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6474*4882a593Smuzhiyun }
6475*4882a593Smuzhiyun }
6476*4882a593Smuzhiyun
6477*4882a593Smuzhiyun return ReturnCode;
6478*4882a593Smuzhiyun
6479*4882a593Smuzhiyun } /* end of mgsl_get_rx_frame() */
6480*4882a593Smuzhiyun
6481*4882a593Smuzhiyun /* mgsl_get_raw_rx_frame()
6482*4882a593Smuzhiyun *
6483*4882a593Smuzhiyun * This function attempts to return a received frame from the
6484*4882a593Smuzhiyun * receive DMA buffers when running in external loop mode. In this mode,
6485*4882a593Smuzhiyun * we will return at most one DMABUFFERSIZE frame to the application.
6486*4882a593Smuzhiyun * The USC receiver is triggering off of DCD going active to start a new
6487*4882a593Smuzhiyun * frame, and DCD going inactive to terminate the frame (similar to
6488*4882a593Smuzhiyun * processing a closing flag character).
6489*4882a593Smuzhiyun *
6490*4882a593Smuzhiyun * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6491*4882a593Smuzhiyun * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6492*4882a593Smuzhiyun * status field and the RCC field will indicate the length of the
6493*4882a593Smuzhiyun * entire received frame. We take this RCC field and get the modulus
6494*4882a593Smuzhiyun * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6495*4882a593Smuzhiyun * last Rx DMA buffer and return that last portion of the frame.
6496*4882a593Smuzhiyun *
6497*4882a593Smuzhiyun * Arguments: info pointer to device extension
6498*4882a593Smuzhiyun * Return Value: true if frame returned, otherwise false
6499*4882a593Smuzhiyun */
mgsl_get_raw_rx_frame(struct mgsl_struct * info)6500*4882a593Smuzhiyun static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6501*4882a593Smuzhiyun {
6502*4882a593Smuzhiyun unsigned int CurrentIndex, NextIndex;
6503*4882a593Smuzhiyun unsigned short status;
6504*4882a593Smuzhiyun DMABUFFERENTRY *pBufEntry;
6505*4882a593Smuzhiyun unsigned int framesize = 0;
6506*4882a593Smuzhiyun bool ReturnCode = false;
6507*4882a593Smuzhiyun unsigned long flags;
6508*4882a593Smuzhiyun struct tty_struct *tty = info->port.tty;
6509*4882a593Smuzhiyun
6510*4882a593Smuzhiyun /*
6511*4882a593Smuzhiyun * current_rx_buffer points to the 1st buffer of the next available
6512*4882a593Smuzhiyun * receive frame. The status field is set by the 16C32 after
6513*4882a593Smuzhiyun * completing a receive frame. If the status field of this buffer
6514*4882a593Smuzhiyun * is zero, either the USC is still filling this buffer or this
6515*4882a593Smuzhiyun * is one of a series of buffers making up a received frame.
6516*4882a593Smuzhiyun *
6517*4882a593Smuzhiyun * If the count field of this buffer is zero, the USC is either
6518*4882a593Smuzhiyun * using this buffer or has used this buffer. Look at the count
6519*4882a593Smuzhiyun * field of the next buffer. If that next buffer's count is
6520*4882a593Smuzhiyun * non-zero, the USC is still actively using the current buffer.
6521*4882a593Smuzhiyun * Otherwise, if the next buffer's count field is zero, the
6522*4882a593Smuzhiyun * current buffer is complete and the USC is using the next
6523*4882a593Smuzhiyun * buffer.
6524*4882a593Smuzhiyun */
6525*4882a593Smuzhiyun CurrentIndex = NextIndex = info->current_rx_buffer;
6526*4882a593Smuzhiyun ++NextIndex;
6527*4882a593Smuzhiyun if ( NextIndex == info->rx_buffer_count )
6528*4882a593Smuzhiyun NextIndex = 0;
6529*4882a593Smuzhiyun
6530*4882a593Smuzhiyun if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6531*4882a593Smuzhiyun (info->rx_buffer_list[CurrentIndex].count == 0 &&
6532*4882a593Smuzhiyun info->rx_buffer_list[NextIndex].count == 0)) {
6533*4882a593Smuzhiyun /*
6534*4882a593Smuzhiyun * Either the status field of this dma buffer is non-zero
6535*4882a593Smuzhiyun * (indicating the last buffer of a receive frame) or the next
6536*4882a593Smuzhiyun * buffer is marked as in use -- implying this buffer is complete
6537*4882a593Smuzhiyun * and an intermediate buffer for this received frame.
6538*4882a593Smuzhiyun */
6539*4882a593Smuzhiyun
6540*4882a593Smuzhiyun status = info->rx_buffer_list[CurrentIndex].status;
6541*4882a593Smuzhiyun
6542*4882a593Smuzhiyun if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6543*4882a593Smuzhiyun RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6544*4882a593Smuzhiyun if ( status & RXSTATUS_SHORT_FRAME )
6545*4882a593Smuzhiyun info->icount.rxshort++;
6546*4882a593Smuzhiyun else if ( status & RXSTATUS_ABORT )
6547*4882a593Smuzhiyun info->icount.rxabort++;
6548*4882a593Smuzhiyun else if ( status & RXSTATUS_OVERRUN )
6549*4882a593Smuzhiyun info->icount.rxover++;
6550*4882a593Smuzhiyun else
6551*4882a593Smuzhiyun info->icount.rxcrc++;
6552*4882a593Smuzhiyun framesize = 0;
6553*4882a593Smuzhiyun } else {
6554*4882a593Smuzhiyun /*
6555*4882a593Smuzhiyun * A receive frame is available, get frame size and status.
6556*4882a593Smuzhiyun *
6557*4882a593Smuzhiyun * The frame size is the starting value of the RCC (which was
6558*4882a593Smuzhiyun * set to 0xffff) minus the ending value of the RCC (decremented
6559*4882a593Smuzhiyun * once for each receive character) minus 2 or 4 for the 16-bit
6560*4882a593Smuzhiyun * or 32-bit CRC.
6561*4882a593Smuzhiyun *
6562*4882a593Smuzhiyun * If the status field is zero, this is an intermediate buffer.
6563*4882a593Smuzhiyun * It's size is 4K.
6564*4882a593Smuzhiyun *
6565*4882a593Smuzhiyun * If the DMA Buffer Entry's Status field is non-zero, the
6566*4882a593Smuzhiyun * receive operation completed normally (ie: DCD dropped). The
6567*4882a593Smuzhiyun * RCC field is valid and holds the received frame size.
6568*4882a593Smuzhiyun * It is possible that the RCC field will be zero on a DMA buffer
6569*4882a593Smuzhiyun * entry with a non-zero status. This can occur if the total
6570*4882a593Smuzhiyun * frame size (number of bytes between the time DCD goes active
6571*4882a593Smuzhiyun * to the time DCD goes inactive) exceeds 65535 bytes. In this
6572*4882a593Smuzhiyun * case the 16C32 has underrun on the RCC count and appears to
6573*4882a593Smuzhiyun * stop updating this counter to let us know the actual received
6574*4882a593Smuzhiyun * frame size. If this happens (non-zero status and zero RCC),
6575*4882a593Smuzhiyun * simply return the entire RxDMA Buffer
6576*4882a593Smuzhiyun */
6577*4882a593Smuzhiyun if ( status ) {
6578*4882a593Smuzhiyun /*
6579*4882a593Smuzhiyun * In the event that the final RxDMA Buffer is
6580*4882a593Smuzhiyun * terminated with a non-zero status and the RCC
6581*4882a593Smuzhiyun * field is zero, we interpret this as the RCC
6582*4882a593Smuzhiyun * having underflowed (received frame > 65535 bytes).
6583*4882a593Smuzhiyun *
6584*4882a593Smuzhiyun * Signal the event to the user by passing back
6585*4882a593Smuzhiyun * a status of RxStatus_CrcError returning the full
6586*4882a593Smuzhiyun * buffer and let the app figure out what data is
6587*4882a593Smuzhiyun * actually valid
6588*4882a593Smuzhiyun */
6589*4882a593Smuzhiyun if ( info->rx_buffer_list[CurrentIndex].rcc )
6590*4882a593Smuzhiyun framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6591*4882a593Smuzhiyun else
6592*4882a593Smuzhiyun framesize = DMABUFFERSIZE;
6593*4882a593Smuzhiyun }
6594*4882a593Smuzhiyun else
6595*4882a593Smuzhiyun framesize = DMABUFFERSIZE;
6596*4882a593Smuzhiyun }
6597*4882a593Smuzhiyun
6598*4882a593Smuzhiyun if ( framesize > DMABUFFERSIZE ) {
6599*4882a593Smuzhiyun /*
6600*4882a593Smuzhiyun * if running in raw sync mode, ISR handler for
6601*4882a593Smuzhiyun * End Of Buffer events terminates all buffers at 4K.
6602*4882a593Smuzhiyun * If this frame size is said to be >4K, get the
6603*4882a593Smuzhiyun * actual number of bytes of the frame in this buffer.
6604*4882a593Smuzhiyun */
6605*4882a593Smuzhiyun framesize = framesize % DMABUFFERSIZE;
6606*4882a593Smuzhiyun }
6607*4882a593Smuzhiyun
6608*4882a593Smuzhiyun
6609*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_BH )
6610*4882a593Smuzhiyun printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6611*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name,status,framesize);
6612*4882a593Smuzhiyun
6613*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_DATA )
6614*4882a593Smuzhiyun mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6615*4882a593Smuzhiyun min_t(int, framesize, DMABUFFERSIZE),0);
6616*4882a593Smuzhiyun
6617*4882a593Smuzhiyun if (framesize) {
6618*4882a593Smuzhiyun /* copy dma buffer(s) to contiguous intermediate buffer */
6619*4882a593Smuzhiyun /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6620*4882a593Smuzhiyun
6621*4882a593Smuzhiyun pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6622*4882a593Smuzhiyun memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6623*4882a593Smuzhiyun info->icount.rxok++;
6624*4882a593Smuzhiyun
6625*4882a593Smuzhiyun ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6626*4882a593Smuzhiyun }
6627*4882a593Smuzhiyun
6628*4882a593Smuzhiyun /* Free the buffers used by this frame. */
6629*4882a593Smuzhiyun mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6630*4882a593Smuzhiyun
6631*4882a593Smuzhiyun ReturnCode = true;
6632*4882a593Smuzhiyun }
6633*4882a593Smuzhiyun
6634*4882a593Smuzhiyun
6635*4882a593Smuzhiyun if ( info->rx_enabled && info->rx_overflow ) {
6636*4882a593Smuzhiyun /* The receiver needs to restarted because of
6637*4882a593Smuzhiyun * a receive overflow (buffer or FIFO). If the
6638*4882a593Smuzhiyun * receive buffers are now empty, then restart receiver.
6639*4882a593Smuzhiyun */
6640*4882a593Smuzhiyun
6641*4882a593Smuzhiyun if ( !info->rx_buffer_list[CurrentIndex].status &&
6642*4882a593Smuzhiyun info->rx_buffer_list[CurrentIndex].count ) {
6643*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6644*4882a593Smuzhiyun usc_start_receiver(info);
6645*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6646*4882a593Smuzhiyun }
6647*4882a593Smuzhiyun }
6648*4882a593Smuzhiyun
6649*4882a593Smuzhiyun return ReturnCode;
6650*4882a593Smuzhiyun
6651*4882a593Smuzhiyun } /* end of mgsl_get_raw_rx_frame() */
6652*4882a593Smuzhiyun
6653*4882a593Smuzhiyun /* mgsl_load_tx_dma_buffer()
6654*4882a593Smuzhiyun *
6655*4882a593Smuzhiyun * Load the transmit DMA buffer with the specified data.
6656*4882a593Smuzhiyun *
6657*4882a593Smuzhiyun * Arguments:
6658*4882a593Smuzhiyun *
6659*4882a593Smuzhiyun * info pointer to device extension
6660*4882a593Smuzhiyun * Buffer pointer to buffer containing frame to load
6661*4882a593Smuzhiyun * BufferSize size in bytes of frame in Buffer
6662*4882a593Smuzhiyun *
6663*4882a593Smuzhiyun * Return Value: None
6664*4882a593Smuzhiyun */
mgsl_load_tx_dma_buffer(struct mgsl_struct * info,const char * Buffer,unsigned int BufferSize)6665*4882a593Smuzhiyun static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6666*4882a593Smuzhiyun const char *Buffer, unsigned int BufferSize)
6667*4882a593Smuzhiyun {
6668*4882a593Smuzhiyun unsigned short Copycount;
6669*4882a593Smuzhiyun unsigned int i = 0;
6670*4882a593Smuzhiyun DMABUFFERENTRY *pBufEntry;
6671*4882a593Smuzhiyun
6672*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_DATA )
6673*4882a593Smuzhiyun mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6674*4882a593Smuzhiyun
6675*4882a593Smuzhiyun if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6676*4882a593Smuzhiyun /* set CMR:13 to start transmit when
6677*4882a593Smuzhiyun * next GoAhead (abort) is received
6678*4882a593Smuzhiyun */
6679*4882a593Smuzhiyun info->cmr_value |= BIT13;
6680*4882a593Smuzhiyun }
6681*4882a593Smuzhiyun
6682*4882a593Smuzhiyun /* begin loading the frame in the next available tx dma
6683*4882a593Smuzhiyun * buffer, remember it's starting location for setting
6684*4882a593Smuzhiyun * up tx dma operation
6685*4882a593Smuzhiyun */
6686*4882a593Smuzhiyun i = info->current_tx_buffer;
6687*4882a593Smuzhiyun info->start_tx_dma_buffer = i;
6688*4882a593Smuzhiyun
6689*4882a593Smuzhiyun /* Setup the status and RCC (Frame Size) fields of the 1st */
6690*4882a593Smuzhiyun /* buffer entry in the transmit DMA buffer list. */
6691*4882a593Smuzhiyun
6692*4882a593Smuzhiyun info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6693*4882a593Smuzhiyun info->tx_buffer_list[i].rcc = BufferSize;
6694*4882a593Smuzhiyun info->tx_buffer_list[i].count = BufferSize;
6695*4882a593Smuzhiyun
6696*4882a593Smuzhiyun /* Copy frame data from 1st source buffer to the DMA buffers. */
6697*4882a593Smuzhiyun /* The frame data may span multiple DMA buffers. */
6698*4882a593Smuzhiyun
6699*4882a593Smuzhiyun while( BufferSize ){
6700*4882a593Smuzhiyun /* Get a pointer to next DMA buffer entry. */
6701*4882a593Smuzhiyun pBufEntry = &info->tx_buffer_list[i++];
6702*4882a593Smuzhiyun
6703*4882a593Smuzhiyun if ( i == info->tx_buffer_count )
6704*4882a593Smuzhiyun i=0;
6705*4882a593Smuzhiyun
6706*4882a593Smuzhiyun /* Calculate the number of bytes that can be copied from */
6707*4882a593Smuzhiyun /* the source buffer to this DMA buffer. */
6708*4882a593Smuzhiyun if ( BufferSize > DMABUFFERSIZE )
6709*4882a593Smuzhiyun Copycount = DMABUFFERSIZE;
6710*4882a593Smuzhiyun else
6711*4882a593Smuzhiyun Copycount = BufferSize;
6712*4882a593Smuzhiyun
6713*4882a593Smuzhiyun /* Actually copy data from source buffer to DMA buffer. */
6714*4882a593Smuzhiyun /* Also set the data count for this individual DMA buffer. */
6715*4882a593Smuzhiyun mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6716*4882a593Smuzhiyun
6717*4882a593Smuzhiyun pBufEntry->count = Copycount;
6718*4882a593Smuzhiyun
6719*4882a593Smuzhiyun /* Advance source pointer and reduce remaining data count. */
6720*4882a593Smuzhiyun Buffer += Copycount;
6721*4882a593Smuzhiyun BufferSize -= Copycount;
6722*4882a593Smuzhiyun
6723*4882a593Smuzhiyun ++info->tx_dma_buffers_used;
6724*4882a593Smuzhiyun }
6725*4882a593Smuzhiyun
6726*4882a593Smuzhiyun /* remember next available tx dma buffer */
6727*4882a593Smuzhiyun info->current_tx_buffer = i;
6728*4882a593Smuzhiyun
6729*4882a593Smuzhiyun } /* end of mgsl_load_tx_dma_buffer() */
6730*4882a593Smuzhiyun
6731*4882a593Smuzhiyun /*
6732*4882a593Smuzhiyun * mgsl_register_test()
6733*4882a593Smuzhiyun *
6734*4882a593Smuzhiyun * Performs a register test of the 16C32.
6735*4882a593Smuzhiyun *
6736*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6737*4882a593Smuzhiyun * Return Value: true if test passed, otherwise false
6738*4882a593Smuzhiyun */
mgsl_register_test(struct mgsl_struct * info)6739*4882a593Smuzhiyun static bool mgsl_register_test( struct mgsl_struct *info )
6740*4882a593Smuzhiyun {
6741*4882a593Smuzhiyun static unsigned short BitPatterns[] =
6742*4882a593Smuzhiyun { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6743*4882a593Smuzhiyun static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6744*4882a593Smuzhiyun unsigned int i;
6745*4882a593Smuzhiyun bool rc = true;
6746*4882a593Smuzhiyun unsigned long flags;
6747*4882a593Smuzhiyun
6748*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6749*4882a593Smuzhiyun usc_reset(info);
6750*4882a593Smuzhiyun
6751*4882a593Smuzhiyun /* Verify the reset state of some registers. */
6752*4882a593Smuzhiyun
6753*4882a593Smuzhiyun if ( (usc_InReg( info, SICR ) != 0) ||
6754*4882a593Smuzhiyun (usc_InReg( info, IVR ) != 0) ||
6755*4882a593Smuzhiyun (usc_InDmaReg( info, DIVR ) != 0) ){
6756*4882a593Smuzhiyun rc = false;
6757*4882a593Smuzhiyun }
6758*4882a593Smuzhiyun
6759*4882a593Smuzhiyun if ( rc ){
6760*4882a593Smuzhiyun /* Write bit patterns to various registers but do it out of */
6761*4882a593Smuzhiyun /* sync, then read back and verify values. */
6762*4882a593Smuzhiyun
6763*4882a593Smuzhiyun for ( i = 0 ; i < Patterncount ; i++ ) {
6764*4882a593Smuzhiyun usc_OutReg( info, TC0R, BitPatterns[i] );
6765*4882a593Smuzhiyun usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6766*4882a593Smuzhiyun usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6767*4882a593Smuzhiyun usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6768*4882a593Smuzhiyun usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6769*4882a593Smuzhiyun usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6770*4882a593Smuzhiyun
6771*4882a593Smuzhiyun if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6772*4882a593Smuzhiyun (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6773*4882a593Smuzhiyun (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6774*4882a593Smuzhiyun (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6775*4882a593Smuzhiyun (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6776*4882a593Smuzhiyun (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6777*4882a593Smuzhiyun rc = false;
6778*4882a593Smuzhiyun break;
6779*4882a593Smuzhiyun }
6780*4882a593Smuzhiyun }
6781*4882a593Smuzhiyun }
6782*4882a593Smuzhiyun
6783*4882a593Smuzhiyun usc_reset(info);
6784*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6785*4882a593Smuzhiyun
6786*4882a593Smuzhiyun return rc;
6787*4882a593Smuzhiyun
6788*4882a593Smuzhiyun } /* end of mgsl_register_test() */
6789*4882a593Smuzhiyun
6790*4882a593Smuzhiyun /* mgsl_irq_test() Perform interrupt test of the 16C32.
6791*4882a593Smuzhiyun *
6792*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6793*4882a593Smuzhiyun * Return Value: true if test passed, otherwise false
6794*4882a593Smuzhiyun */
mgsl_irq_test(struct mgsl_struct * info)6795*4882a593Smuzhiyun static bool mgsl_irq_test( struct mgsl_struct *info )
6796*4882a593Smuzhiyun {
6797*4882a593Smuzhiyun unsigned long EndTime;
6798*4882a593Smuzhiyun unsigned long flags;
6799*4882a593Smuzhiyun
6800*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6801*4882a593Smuzhiyun usc_reset(info);
6802*4882a593Smuzhiyun
6803*4882a593Smuzhiyun /*
6804*4882a593Smuzhiyun * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
6805*4882a593Smuzhiyun * The ISR sets irq_occurred to true.
6806*4882a593Smuzhiyun */
6807*4882a593Smuzhiyun
6808*4882a593Smuzhiyun info->irq_occurred = false;
6809*4882a593Smuzhiyun
6810*4882a593Smuzhiyun /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
6811*4882a593Smuzhiyun /* Enable INTEN (Port 6, Bit12) */
6812*4882a593Smuzhiyun /* This connects the IRQ request signal to the ISA bus */
6813*4882a593Smuzhiyun /* on the ISA adapter. This has no effect for the PCI adapter */
6814*4882a593Smuzhiyun usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
6815*4882a593Smuzhiyun
6816*4882a593Smuzhiyun usc_EnableMasterIrqBit(info);
6817*4882a593Smuzhiyun usc_EnableInterrupts(info, IO_PIN);
6818*4882a593Smuzhiyun usc_ClearIrqPendingBits(info, IO_PIN);
6819*4882a593Smuzhiyun
6820*4882a593Smuzhiyun usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
6821*4882a593Smuzhiyun usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
6822*4882a593Smuzhiyun
6823*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6824*4882a593Smuzhiyun
6825*4882a593Smuzhiyun EndTime=100;
6826*4882a593Smuzhiyun while( EndTime-- && !info->irq_occurred ) {
6827*4882a593Smuzhiyun msleep_interruptible(10);
6828*4882a593Smuzhiyun }
6829*4882a593Smuzhiyun
6830*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6831*4882a593Smuzhiyun usc_reset(info);
6832*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6833*4882a593Smuzhiyun
6834*4882a593Smuzhiyun return info->irq_occurred;
6835*4882a593Smuzhiyun
6836*4882a593Smuzhiyun } /* end of mgsl_irq_test() */
6837*4882a593Smuzhiyun
6838*4882a593Smuzhiyun /* mgsl_dma_test()
6839*4882a593Smuzhiyun *
6840*4882a593Smuzhiyun * Perform a DMA test of the 16C32. A small frame is
6841*4882a593Smuzhiyun * transmitted via DMA from a transmit buffer to a receive buffer
6842*4882a593Smuzhiyun * using single buffer DMA mode.
6843*4882a593Smuzhiyun *
6844*4882a593Smuzhiyun * Arguments: info pointer to device instance data
6845*4882a593Smuzhiyun * Return Value: true if test passed, otherwise false
6846*4882a593Smuzhiyun */
mgsl_dma_test(struct mgsl_struct * info)6847*4882a593Smuzhiyun static bool mgsl_dma_test( struct mgsl_struct *info )
6848*4882a593Smuzhiyun {
6849*4882a593Smuzhiyun unsigned short FifoLevel;
6850*4882a593Smuzhiyun unsigned long phys_addr;
6851*4882a593Smuzhiyun unsigned int FrameSize;
6852*4882a593Smuzhiyun unsigned int i;
6853*4882a593Smuzhiyun char *TmpPtr;
6854*4882a593Smuzhiyun bool rc = true;
6855*4882a593Smuzhiyun unsigned short status=0;
6856*4882a593Smuzhiyun unsigned long EndTime;
6857*4882a593Smuzhiyun unsigned long flags;
6858*4882a593Smuzhiyun MGSL_PARAMS tmp_params;
6859*4882a593Smuzhiyun
6860*4882a593Smuzhiyun /* save current port options */
6861*4882a593Smuzhiyun memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
6862*4882a593Smuzhiyun /* load default port options */
6863*4882a593Smuzhiyun memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
6864*4882a593Smuzhiyun
6865*4882a593Smuzhiyun #define TESTFRAMESIZE 40
6866*4882a593Smuzhiyun
6867*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6868*4882a593Smuzhiyun
6869*4882a593Smuzhiyun /* setup 16C32 for SDLC DMA transfer mode */
6870*4882a593Smuzhiyun
6871*4882a593Smuzhiyun usc_reset(info);
6872*4882a593Smuzhiyun usc_set_sdlc_mode(info);
6873*4882a593Smuzhiyun usc_enable_loopback(info,1);
6874*4882a593Smuzhiyun
6875*4882a593Smuzhiyun /* Reprogram the RDMR so that the 16C32 does NOT clear the count
6876*4882a593Smuzhiyun * field of the buffer entry after fetching buffer address. This
6877*4882a593Smuzhiyun * way we can detect a DMA failure for a DMA read (which should be
6878*4882a593Smuzhiyun * non-destructive to system memory) before we try and write to
6879*4882a593Smuzhiyun * memory (where a failure could corrupt system memory).
6880*4882a593Smuzhiyun */
6881*4882a593Smuzhiyun
6882*4882a593Smuzhiyun /* Receive DMA mode Register (RDMR)
6883*4882a593Smuzhiyun *
6884*4882a593Smuzhiyun * <15..14> 11 DMA mode = Linked List Buffer mode
6885*4882a593Smuzhiyun * <13> 1 RSBinA/L = store Rx status Block in List entry
6886*4882a593Smuzhiyun * <12> 0 1 = Clear count of List Entry after fetching
6887*4882a593Smuzhiyun * <11..10> 00 Address mode = Increment
6888*4882a593Smuzhiyun * <9> 1 Terminate Buffer on RxBound
6889*4882a593Smuzhiyun * <8> 0 Bus Width = 16bits
6890*4882a593Smuzhiyun * <7..0> ? status Bits (write as 0s)
6891*4882a593Smuzhiyun *
6892*4882a593Smuzhiyun * 1110 0010 0000 0000 = 0xe200
6893*4882a593Smuzhiyun */
6894*4882a593Smuzhiyun
6895*4882a593Smuzhiyun usc_OutDmaReg( info, RDMR, 0xe200 );
6896*4882a593Smuzhiyun
6897*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6898*4882a593Smuzhiyun
6899*4882a593Smuzhiyun
6900*4882a593Smuzhiyun /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
6901*4882a593Smuzhiyun
6902*4882a593Smuzhiyun FrameSize = TESTFRAMESIZE;
6903*4882a593Smuzhiyun
6904*4882a593Smuzhiyun /* setup 1st transmit buffer entry: */
6905*4882a593Smuzhiyun /* with frame size and transmit control word */
6906*4882a593Smuzhiyun
6907*4882a593Smuzhiyun info->tx_buffer_list[0].count = FrameSize;
6908*4882a593Smuzhiyun info->tx_buffer_list[0].rcc = FrameSize;
6909*4882a593Smuzhiyun info->tx_buffer_list[0].status = 0x4000;
6910*4882a593Smuzhiyun
6911*4882a593Smuzhiyun /* build a transmit frame in 1st transmit DMA buffer */
6912*4882a593Smuzhiyun
6913*4882a593Smuzhiyun TmpPtr = info->tx_buffer_list[0].virt_addr;
6914*4882a593Smuzhiyun for (i = 0; i < FrameSize; i++ )
6915*4882a593Smuzhiyun *TmpPtr++ = i;
6916*4882a593Smuzhiyun
6917*4882a593Smuzhiyun /* setup 1st receive buffer entry: */
6918*4882a593Smuzhiyun /* clear status, set max receive buffer size */
6919*4882a593Smuzhiyun
6920*4882a593Smuzhiyun info->rx_buffer_list[0].status = 0;
6921*4882a593Smuzhiyun info->rx_buffer_list[0].count = FrameSize + 4;
6922*4882a593Smuzhiyun
6923*4882a593Smuzhiyun /* zero out the 1st receive buffer */
6924*4882a593Smuzhiyun
6925*4882a593Smuzhiyun memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
6926*4882a593Smuzhiyun
6927*4882a593Smuzhiyun /* Set count field of next buffer entries to prevent */
6928*4882a593Smuzhiyun /* 16C32 from using buffers after the 1st one. */
6929*4882a593Smuzhiyun
6930*4882a593Smuzhiyun info->tx_buffer_list[1].count = 0;
6931*4882a593Smuzhiyun info->rx_buffer_list[1].count = 0;
6932*4882a593Smuzhiyun
6933*4882a593Smuzhiyun
6934*4882a593Smuzhiyun /***************************/
6935*4882a593Smuzhiyun /* Program 16C32 receiver. */
6936*4882a593Smuzhiyun /***************************/
6937*4882a593Smuzhiyun
6938*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6939*4882a593Smuzhiyun
6940*4882a593Smuzhiyun /* setup DMA transfers */
6941*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeRxFifo );
6942*4882a593Smuzhiyun
6943*4882a593Smuzhiyun /* program 16C32 receiver with physical address of 1st DMA buffer entry */
6944*4882a593Smuzhiyun phys_addr = info->rx_buffer_list[0].phys_entry;
6945*4882a593Smuzhiyun usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
6946*4882a593Smuzhiyun usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
6947*4882a593Smuzhiyun
6948*4882a593Smuzhiyun /* Clear the Rx DMA status bits (read RDMR) and start channel */
6949*4882a593Smuzhiyun usc_InDmaReg( info, RDMR );
6950*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_InitRxChannel );
6951*4882a593Smuzhiyun
6952*4882a593Smuzhiyun /* Enable Receiver (RMR <1..0> = 10) */
6953*4882a593Smuzhiyun usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
6954*4882a593Smuzhiyun
6955*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6956*4882a593Smuzhiyun
6957*4882a593Smuzhiyun
6958*4882a593Smuzhiyun /*************************************************************/
6959*4882a593Smuzhiyun /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
6960*4882a593Smuzhiyun /*************************************************************/
6961*4882a593Smuzhiyun
6962*4882a593Smuzhiyun /* Wait 100ms for interrupt. */
6963*4882a593Smuzhiyun EndTime = jiffies + msecs_to_jiffies(100);
6964*4882a593Smuzhiyun
6965*4882a593Smuzhiyun for(;;) {
6966*4882a593Smuzhiyun if (time_after(jiffies, EndTime)) {
6967*4882a593Smuzhiyun rc = false;
6968*4882a593Smuzhiyun break;
6969*4882a593Smuzhiyun }
6970*4882a593Smuzhiyun
6971*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6972*4882a593Smuzhiyun status = usc_InDmaReg( info, RDMR );
6973*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
6974*4882a593Smuzhiyun
6975*4882a593Smuzhiyun if ( !(status & BIT4) && (status & BIT5) ) {
6976*4882a593Smuzhiyun /* INITG (BIT 4) is inactive (no entry read in progress) AND */
6977*4882a593Smuzhiyun /* BUSY (BIT 5) is active (channel still active). */
6978*4882a593Smuzhiyun /* This means the buffer entry read has completed. */
6979*4882a593Smuzhiyun break;
6980*4882a593Smuzhiyun }
6981*4882a593Smuzhiyun }
6982*4882a593Smuzhiyun
6983*4882a593Smuzhiyun
6984*4882a593Smuzhiyun /******************************/
6985*4882a593Smuzhiyun /* Program 16C32 transmitter. */
6986*4882a593Smuzhiyun /******************************/
6987*4882a593Smuzhiyun
6988*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
6989*4882a593Smuzhiyun
6990*4882a593Smuzhiyun /* Program the Transmit Character Length Register (TCLR) */
6991*4882a593Smuzhiyun /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6992*4882a593Smuzhiyun
6993*4882a593Smuzhiyun usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
6994*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeTxFifo );
6995*4882a593Smuzhiyun
6996*4882a593Smuzhiyun /* Program the address of the 1st DMA Buffer Entry in linked list */
6997*4882a593Smuzhiyun
6998*4882a593Smuzhiyun phys_addr = info->tx_buffer_list[0].phys_entry;
6999*4882a593Smuzhiyun usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7000*4882a593Smuzhiyun usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7001*4882a593Smuzhiyun
7002*4882a593Smuzhiyun /* unlatch Tx status bits, and start transmit channel. */
7003*4882a593Smuzhiyun
7004*4882a593Smuzhiyun usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7005*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_InitTxChannel );
7006*4882a593Smuzhiyun
7007*4882a593Smuzhiyun /* wait for DMA controller to fill transmit FIFO */
7008*4882a593Smuzhiyun
7009*4882a593Smuzhiyun usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7010*4882a593Smuzhiyun
7011*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7012*4882a593Smuzhiyun
7013*4882a593Smuzhiyun
7014*4882a593Smuzhiyun /**********************************/
7015*4882a593Smuzhiyun /* WAIT FOR TRANSMIT FIFO TO FILL */
7016*4882a593Smuzhiyun /**********************************/
7017*4882a593Smuzhiyun
7018*4882a593Smuzhiyun /* Wait 100ms */
7019*4882a593Smuzhiyun EndTime = jiffies + msecs_to_jiffies(100);
7020*4882a593Smuzhiyun
7021*4882a593Smuzhiyun for(;;) {
7022*4882a593Smuzhiyun if (time_after(jiffies, EndTime)) {
7023*4882a593Smuzhiyun rc = false;
7024*4882a593Smuzhiyun break;
7025*4882a593Smuzhiyun }
7026*4882a593Smuzhiyun
7027*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7028*4882a593Smuzhiyun FifoLevel = usc_InReg(info, TICR) >> 8;
7029*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7030*4882a593Smuzhiyun
7031*4882a593Smuzhiyun if ( FifoLevel < 16 )
7032*4882a593Smuzhiyun break;
7033*4882a593Smuzhiyun else
7034*4882a593Smuzhiyun if ( FrameSize < 32 ) {
7035*4882a593Smuzhiyun /* This frame is smaller than the entire transmit FIFO */
7036*4882a593Smuzhiyun /* so wait for the entire frame to be loaded. */
7037*4882a593Smuzhiyun if ( FifoLevel <= (32 - FrameSize) )
7038*4882a593Smuzhiyun break;
7039*4882a593Smuzhiyun }
7040*4882a593Smuzhiyun }
7041*4882a593Smuzhiyun
7042*4882a593Smuzhiyun
7043*4882a593Smuzhiyun if ( rc )
7044*4882a593Smuzhiyun {
7045*4882a593Smuzhiyun /* Enable 16C32 transmitter. */
7046*4882a593Smuzhiyun
7047*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7048*4882a593Smuzhiyun
7049*4882a593Smuzhiyun /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7050*4882a593Smuzhiyun usc_TCmd( info, TCmd_SendFrame );
7051*4882a593Smuzhiyun usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7052*4882a593Smuzhiyun
7053*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7054*4882a593Smuzhiyun
7055*4882a593Smuzhiyun
7056*4882a593Smuzhiyun /******************************/
7057*4882a593Smuzhiyun /* WAIT FOR TRANSMIT COMPLETE */
7058*4882a593Smuzhiyun /******************************/
7059*4882a593Smuzhiyun
7060*4882a593Smuzhiyun /* Wait 100ms */
7061*4882a593Smuzhiyun EndTime = jiffies + msecs_to_jiffies(100);
7062*4882a593Smuzhiyun
7063*4882a593Smuzhiyun /* While timer not expired wait for transmit complete */
7064*4882a593Smuzhiyun
7065*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7066*4882a593Smuzhiyun status = usc_InReg( info, TCSR );
7067*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7068*4882a593Smuzhiyun
7069*4882a593Smuzhiyun while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
7070*4882a593Smuzhiyun if (time_after(jiffies, EndTime)) {
7071*4882a593Smuzhiyun rc = false;
7072*4882a593Smuzhiyun break;
7073*4882a593Smuzhiyun }
7074*4882a593Smuzhiyun
7075*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7076*4882a593Smuzhiyun status = usc_InReg( info, TCSR );
7077*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7078*4882a593Smuzhiyun }
7079*4882a593Smuzhiyun }
7080*4882a593Smuzhiyun
7081*4882a593Smuzhiyun
7082*4882a593Smuzhiyun if ( rc ){
7083*4882a593Smuzhiyun /* CHECK FOR TRANSMIT ERRORS */
7084*4882a593Smuzhiyun if ( status & (BIT5 | BIT1) )
7085*4882a593Smuzhiyun rc = false;
7086*4882a593Smuzhiyun }
7087*4882a593Smuzhiyun
7088*4882a593Smuzhiyun if ( rc ) {
7089*4882a593Smuzhiyun /* WAIT FOR RECEIVE COMPLETE */
7090*4882a593Smuzhiyun
7091*4882a593Smuzhiyun /* Wait 100ms */
7092*4882a593Smuzhiyun EndTime = jiffies + msecs_to_jiffies(100);
7093*4882a593Smuzhiyun
7094*4882a593Smuzhiyun /* Wait for 16C32 to write receive status to buffer entry. */
7095*4882a593Smuzhiyun status=info->rx_buffer_list[0].status;
7096*4882a593Smuzhiyun while ( status == 0 ) {
7097*4882a593Smuzhiyun if (time_after(jiffies, EndTime)) {
7098*4882a593Smuzhiyun rc = false;
7099*4882a593Smuzhiyun break;
7100*4882a593Smuzhiyun }
7101*4882a593Smuzhiyun status=info->rx_buffer_list[0].status;
7102*4882a593Smuzhiyun }
7103*4882a593Smuzhiyun }
7104*4882a593Smuzhiyun
7105*4882a593Smuzhiyun
7106*4882a593Smuzhiyun if ( rc ) {
7107*4882a593Smuzhiyun /* CHECK FOR RECEIVE ERRORS */
7108*4882a593Smuzhiyun status = info->rx_buffer_list[0].status;
7109*4882a593Smuzhiyun
7110*4882a593Smuzhiyun if ( status & (BIT8 | BIT3 | BIT1) ) {
7111*4882a593Smuzhiyun /* receive error has occurred */
7112*4882a593Smuzhiyun rc = false;
7113*4882a593Smuzhiyun } else {
7114*4882a593Smuzhiyun if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7115*4882a593Smuzhiyun info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7116*4882a593Smuzhiyun rc = false;
7117*4882a593Smuzhiyun }
7118*4882a593Smuzhiyun }
7119*4882a593Smuzhiyun }
7120*4882a593Smuzhiyun
7121*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7122*4882a593Smuzhiyun usc_reset( info );
7123*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7124*4882a593Smuzhiyun
7125*4882a593Smuzhiyun /* restore current port options */
7126*4882a593Smuzhiyun memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7127*4882a593Smuzhiyun
7128*4882a593Smuzhiyun return rc;
7129*4882a593Smuzhiyun
7130*4882a593Smuzhiyun } /* end of mgsl_dma_test() */
7131*4882a593Smuzhiyun
7132*4882a593Smuzhiyun /* mgsl_adapter_test()
7133*4882a593Smuzhiyun *
7134*4882a593Smuzhiyun * Perform the register, IRQ, and DMA tests for the 16C32.
7135*4882a593Smuzhiyun *
7136*4882a593Smuzhiyun * Arguments: info pointer to device instance data
7137*4882a593Smuzhiyun * Return Value: 0 if success, otherwise -ENODEV
7138*4882a593Smuzhiyun */
mgsl_adapter_test(struct mgsl_struct * info)7139*4882a593Smuzhiyun static int mgsl_adapter_test( struct mgsl_struct *info )
7140*4882a593Smuzhiyun {
7141*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
7142*4882a593Smuzhiyun printk( "%s(%d):Testing device %s\n",
7143*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name );
7144*4882a593Smuzhiyun
7145*4882a593Smuzhiyun if ( !mgsl_register_test( info ) ) {
7146*4882a593Smuzhiyun info->init_error = DiagStatus_AddressFailure;
7147*4882a593Smuzhiyun printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7148*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7149*4882a593Smuzhiyun return -ENODEV;
7150*4882a593Smuzhiyun }
7151*4882a593Smuzhiyun
7152*4882a593Smuzhiyun if ( !mgsl_irq_test( info ) ) {
7153*4882a593Smuzhiyun info->init_error = DiagStatus_IrqFailure;
7154*4882a593Smuzhiyun printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7155*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7156*4882a593Smuzhiyun return -ENODEV;
7157*4882a593Smuzhiyun }
7158*4882a593Smuzhiyun
7159*4882a593Smuzhiyun if ( !mgsl_dma_test( info ) ) {
7160*4882a593Smuzhiyun info->init_error = DiagStatus_DmaFailure;
7161*4882a593Smuzhiyun printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7162*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7163*4882a593Smuzhiyun return -ENODEV;
7164*4882a593Smuzhiyun }
7165*4882a593Smuzhiyun
7166*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
7167*4882a593Smuzhiyun printk( "%s(%d):device %s passed diagnostics\n",
7168*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name );
7169*4882a593Smuzhiyun
7170*4882a593Smuzhiyun return 0;
7171*4882a593Smuzhiyun
7172*4882a593Smuzhiyun } /* end of mgsl_adapter_test() */
7173*4882a593Smuzhiyun
7174*4882a593Smuzhiyun /* mgsl_memory_test()
7175*4882a593Smuzhiyun *
7176*4882a593Smuzhiyun * Test the shared memory on a PCI adapter.
7177*4882a593Smuzhiyun *
7178*4882a593Smuzhiyun * Arguments: info pointer to device instance data
7179*4882a593Smuzhiyun * Return Value: true if test passed, otherwise false
7180*4882a593Smuzhiyun */
mgsl_memory_test(struct mgsl_struct * info)7181*4882a593Smuzhiyun static bool mgsl_memory_test( struct mgsl_struct *info )
7182*4882a593Smuzhiyun {
7183*4882a593Smuzhiyun static unsigned long BitPatterns[] =
7184*4882a593Smuzhiyun { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7185*4882a593Smuzhiyun unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7186*4882a593Smuzhiyun unsigned long i;
7187*4882a593Smuzhiyun unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7188*4882a593Smuzhiyun unsigned long * TestAddr;
7189*4882a593Smuzhiyun
7190*4882a593Smuzhiyun TestAddr = (unsigned long *)info->memory_base;
7191*4882a593Smuzhiyun
7192*4882a593Smuzhiyun /* Test data lines with test pattern at one location. */
7193*4882a593Smuzhiyun
7194*4882a593Smuzhiyun for ( i = 0 ; i < Patterncount ; i++ ) {
7195*4882a593Smuzhiyun *TestAddr = BitPatterns[i];
7196*4882a593Smuzhiyun if ( *TestAddr != BitPatterns[i] )
7197*4882a593Smuzhiyun return false;
7198*4882a593Smuzhiyun }
7199*4882a593Smuzhiyun
7200*4882a593Smuzhiyun /* Test address lines with incrementing pattern over */
7201*4882a593Smuzhiyun /* entire address range. */
7202*4882a593Smuzhiyun
7203*4882a593Smuzhiyun for ( i = 0 ; i < TestLimit ; i++ ) {
7204*4882a593Smuzhiyun *TestAddr = i * 4;
7205*4882a593Smuzhiyun TestAddr++;
7206*4882a593Smuzhiyun }
7207*4882a593Smuzhiyun
7208*4882a593Smuzhiyun TestAddr = (unsigned long *)info->memory_base;
7209*4882a593Smuzhiyun
7210*4882a593Smuzhiyun for ( i = 0 ; i < TestLimit ; i++ ) {
7211*4882a593Smuzhiyun if ( *TestAddr != i * 4 )
7212*4882a593Smuzhiyun return false;
7213*4882a593Smuzhiyun TestAddr++;
7214*4882a593Smuzhiyun }
7215*4882a593Smuzhiyun
7216*4882a593Smuzhiyun memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7217*4882a593Smuzhiyun
7218*4882a593Smuzhiyun return true;
7219*4882a593Smuzhiyun
7220*4882a593Smuzhiyun } /* End Of mgsl_memory_test() */
7221*4882a593Smuzhiyun
7222*4882a593Smuzhiyun
7223*4882a593Smuzhiyun /* mgsl_load_pci_memory()
7224*4882a593Smuzhiyun *
7225*4882a593Smuzhiyun * Load a large block of data into the PCI shared memory.
7226*4882a593Smuzhiyun * Use this instead of memcpy() or memmove() to move data
7227*4882a593Smuzhiyun * into the PCI shared memory.
7228*4882a593Smuzhiyun *
7229*4882a593Smuzhiyun * Notes:
7230*4882a593Smuzhiyun *
7231*4882a593Smuzhiyun * This function prevents the PCI9050 interface chip from hogging
7232*4882a593Smuzhiyun * the adapter local bus, which can starve the 16C32 by preventing
7233*4882a593Smuzhiyun * 16C32 bus master cycles.
7234*4882a593Smuzhiyun *
7235*4882a593Smuzhiyun * The PCI9050 documentation says that the 9050 will always release
7236*4882a593Smuzhiyun * control of the local bus after completing the current read
7237*4882a593Smuzhiyun * or write operation.
7238*4882a593Smuzhiyun *
7239*4882a593Smuzhiyun * It appears that as long as the PCI9050 write FIFO is full, the
7240*4882a593Smuzhiyun * PCI9050 treats all of the writes as a single burst transaction
7241*4882a593Smuzhiyun * and will not release the bus. This causes DMA latency problems
7242*4882a593Smuzhiyun * at high speeds when copying large data blocks to the shared
7243*4882a593Smuzhiyun * memory.
7244*4882a593Smuzhiyun *
7245*4882a593Smuzhiyun * This function in effect, breaks the a large shared memory write
7246*4882a593Smuzhiyun * into multiple transations by interleaving a shared memory read
7247*4882a593Smuzhiyun * which will flush the write FIFO and 'complete' the write
7248*4882a593Smuzhiyun * transation. This allows any pending DMA request to gain control
7249*4882a593Smuzhiyun * of the local bus in a timely fasion.
7250*4882a593Smuzhiyun *
7251*4882a593Smuzhiyun * Arguments:
7252*4882a593Smuzhiyun *
7253*4882a593Smuzhiyun * TargetPtr pointer to target address in PCI shared memory
7254*4882a593Smuzhiyun * SourcePtr pointer to source buffer for data
7255*4882a593Smuzhiyun * count count in bytes of data to copy
7256*4882a593Smuzhiyun *
7257*4882a593Smuzhiyun * Return Value: None
7258*4882a593Smuzhiyun */
mgsl_load_pci_memory(char * TargetPtr,const char * SourcePtr,unsigned short count)7259*4882a593Smuzhiyun static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7260*4882a593Smuzhiyun unsigned short count )
7261*4882a593Smuzhiyun {
7262*4882a593Smuzhiyun /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7263*4882a593Smuzhiyun #define PCI_LOAD_INTERVAL 64
7264*4882a593Smuzhiyun
7265*4882a593Smuzhiyun unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7266*4882a593Smuzhiyun unsigned short Index;
7267*4882a593Smuzhiyun unsigned long Dummy;
7268*4882a593Smuzhiyun
7269*4882a593Smuzhiyun for ( Index = 0 ; Index < Intervalcount ; Index++ )
7270*4882a593Smuzhiyun {
7271*4882a593Smuzhiyun memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7272*4882a593Smuzhiyun Dummy = *((volatile unsigned long *)TargetPtr);
7273*4882a593Smuzhiyun TargetPtr += PCI_LOAD_INTERVAL;
7274*4882a593Smuzhiyun SourcePtr += PCI_LOAD_INTERVAL;
7275*4882a593Smuzhiyun }
7276*4882a593Smuzhiyun
7277*4882a593Smuzhiyun memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7278*4882a593Smuzhiyun
7279*4882a593Smuzhiyun } /* End Of mgsl_load_pci_memory() */
7280*4882a593Smuzhiyun
mgsl_trace_block(struct mgsl_struct * info,const char * data,int count,int xmit)7281*4882a593Smuzhiyun static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7282*4882a593Smuzhiyun {
7283*4882a593Smuzhiyun int i;
7284*4882a593Smuzhiyun int linecount;
7285*4882a593Smuzhiyun if (xmit)
7286*4882a593Smuzhiyun printk("%s tx data:\n",info->device_name);
7287*4882a593Smuzhiyun else
7288*4882a593Smuzhiyun printk("%s rx data:\n",info->device_name);
7289*4882a593Smuzhiyun
7290*4882a593Smuzhiyun while(count) {
7291*4882a593Smuzhiyun if (count > 16)
7292*4882a593Smuzhiyun linecount = 16;
7293*4882a593Smuzhiyun else
7294*4882a593Smuzhiyun linecount = count;
7295*4882a593Smuzhiyun
7296*4882a593Smuzhiyun for(i=0;i<linecount;i++)
7297*4882a593Smuzhiyun printk("%02X ",(unsigned char)data[i]);
7298*4882a593Smuzhiyun for(;i<17;i++)
7299*4882a593Smuzhiyun printk(" ");
7300*4882a593Smuzhiyun for(i=0;i<linecount;i++) {
7301*4882a593Smuzhiyun if (data[i]>=040 && data[i]<=0176)
7302*4882a593Smuzhiyun printk("%c",data[i]);
7303*4882a593Smuzhiyun else
7304*4882a593Smuzhiyun printk(".");
7305*4882a593Smuzhiyun }
7306*4882a593Smuzhiyun printk("\n");
7307*4882a593Smuzhiyun
7308*4882a593Smuzhiyun data += linecount;
7309*4882a593Smuzhiyun count -= linecount;
7310*4882a593Smuzhiyun }
7311*4882a593Smuzhiyun } /* end of mgsl_trace_block() */
7312*4882a593Smuzhiyun
7313*4882a593Smuzhiyun /* mgsl_tx_timeout()
7314*4882a593Smuzhiyun *
7315*4882a593Smuzhiyun * called when HDLC frame times out
7316*4882a593Smuzhiyun * update stats and do tx completion processing
7317*4882a593Smuzhiyun *
7318*4882a593Smuzhiyun * Arguments: context pointer to device instance data
7319*4882a593Smuzhiyun * Return Value: None
7320*4882a593Smuzhiyun */
mgsl_tx_timeout(struct timer_list * t)7321*4882a593Smuzhiyun static void mgsl_tx_timeout(struct timer_list *t)
7322*4882a593Smuzhiyun {
7323*4882a593Smuzhiyun struct mgsl_struct *info = from_timer(info, t, tx_timer);
7324*4882a593Smuzhiyun unsigned long flags;
7325*4882a593Smuzhiyun
7326*4882a593Smuzhiyun if ( debug_level >= DEBUG_LEVEL_INFO )
7327*4882a593Smuzhiyun printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7328*4882a593Smuzhiyun __FILE__,__LINE__,info->device_name);
7329*4882a593Smuzhiyun if(info->tx_active &&
7330*4882a593Smuzhiyun (info->params.mode == MGSL_MODE_HDLC ||
7331*4882a593Smuzhiyun info->params.mode == MGSL_MODE_RAW) ) {
7332*4882a593Smuzhiyun info->icount.txtimeout++;
7333*4882a593Smuzhiyun }
7334*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7335*4882a593Smuzhiyun info->tx_active = false;
7336*4882a593Smuzhiyun info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7337*4882a593Smuzhiyun
7338*4882a593Smuzhiyun if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7339*4882a593Smuzhiyun usc_loopmode_cancel_transmit( info );
7340*4882a593Smuzhiyun
7341*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7342*4882a593Smuzhiyun
7343*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
7344*4882a593Smuzhiyun if (info->netcount)
7345*4882a593Smuzhiyun hdlcdev_tx_done(info);
7346*4882a593Smuzhiyun else
7347*4882a593Smuzhiyun #endif
7348*4882a593Smuzhiyun mgsl_bh_transmit(info);
7349*4882a593Smuzhiyun
7350*4882a593Smuzhiyun } /* end of mgsl_tx_timeout() */
7351*4882a593Smuzhiyun
7352*4882a593Smuzhiyun /* signal that there are no more frames to send, so that
7353*4882a593Smuzhiyun * line is 'released' by echoing RxD to TxD when current
7354*4882a593Smuzhiyun * transmission is complete (or immediately if no tx in progress).
7355*4882a593Smuzhiyun */
mgsl_loopmode_send_done(struct mgsl_struct * info)7356*4882a593Smuzhiyun static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7357*4882a593Smuzhiyun {
7358*4882a593Smuzhiyun unsigned long flags;
7359*4882a593Smuzhiyun
7360*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7361*4882a593Smuzhiyun if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7362*4882a593Smuzhiyun if (info->tx_active)
7363*4882a593Smuzhiyun info->loopmode_send_done_requested = true;
7364*4882a593Smuzhiyun else
7365*4882a593Smuzhiyun usc_loopmode_send_done(info);
7366*4882a593Smuzhiyun }
7367*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7368*4882a593Smuzhiyun
7369*4882a593Smuzhiyun return 0;
7370*4882a593Smuzhiyun }
7371*4882a593Smuzhiyun
7372*4882a593Smuzhiyun /* release the line by echoing RxD to TxD
7373*4882a593Smuzhiyun * upon completion of a transmit frame
7374*4882a593Smuzhiyun */
usc_loopmode_send_done(struct mgsl_struct * info)7375*4882a593Smuzhiyun static void usc_loopmode_send_done( struct mgsl_struct * info )
7376*4882a593Smuzhiyun {
7377*4882a593Smuzhiyun info->loopmode_send_done_requested = false;
7378*4882a593Smuzhiyun /* clear CMR:13 to 0 to start echoing RxData to TxData */
7379*4882a593Smuzhiyun info->cmr_value &= ~BIT13;
7380*4882a593Smuzhiyun usc_OutReg(info, CMR, info->cmr_value);
7381*4882a593Smuzhiyun }
7382*4882a593Smuzhiyun
7383*4882a593Smuzhiyun /* abort a transmit in progress while in HDLC LoopMode
7384*4882a593Smuzhiyun */
usc_loopmode_cancel_transmit(struct mgsl_struct * info)7385*4882a593Smuzhiyun static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7386*4882a593Smuzhiyun {
7387*4882a593Smuzhiyun /* reset tx dma channel and purge TxFifo */
7388*4882a593Smuzhiyun usc_RTCmd( info, RTCmd_PurgeTxFifo );
7389*4882a593Smuzhiyun usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7390*4882a593Smuzhiyun usc_loopmode_send_done( info );
7391*4882a593Smuzhiyun }
7392*4882a593Smuzhiyun
7393*4882a593Smuzhiyun /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7394*4882a593Smuzhiyun * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7395*4882a593Smuzhiyun * we must clear CMR:13 to begin repeating TxData to RxData
7396*4882a593Smuzhiyun */
usc_loopmode_insert_request(struct mgsl_struct * info)7397*4882a593Smuzhiyun static void usc_loopmode_insert_request( struct mgsl_struct * info )
7398*4882a593Smuzhiyun {
7399*4882a593Smuzhiyun info->loopmode_insert_requested = true;
7400*4882a593Smuzhiyun
7401*4882a593Smuzhiyun /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7402*4882a593Smuzhiyun * begin repeating TxData on RxData (complete insertion)
7403*4882a593Smuzhiyun */
7404*4882a593Smuzhiyun usc_OutReg( info, RICR,
7405*4882a593Smuzhiyun (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7406*4882a593Smuzhiyun
7407*4882a593Smuzhiyun /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7408*4882a593Smuzhiyun info->cmr_value |= BIT13;
7409*4882a593Smuzhiyun usc_OutReg(info, CMR, info->cmr_value);
7410*4882a593Smuzhiyun }
7411*4882a593Smuzhiyun
7412*4882a593Smuzhiyun /* return 1 if station is inserted into the loop, otherwise 0
7413*4882a593Smuzhiyun */
usc_loopmode_active(struct mgsl_struct * info)7414*4882a593Smuzhiyun static int usc_loopmode_active( struct mgsl_struct * info)
7415*4882a593Smuzhiyun {
7416*4882a593Smuzhiyun return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7417*4882a593Smuzhiyun }
7418*4882a593Smuzhiyun
7419*4882a593Smuzhiyun #if SYNCLINK_GENERIC_HDLC
7420*4882a593Smuzhiyun
7421*4882a593Smuzhiyun /**
7422*4882a593Smuzhiyun * hdlcdev_attach - called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7423*4882a593Smuzhiyun * @dev: pointer to network device structure
7424*4882a593Smuzhiyun * @encoding: serial encoding setting
7425*4882a593Smuzhiyun * @parity: FCS setting
7426*4882a593Smuzhiyun *
7427*4882a593Smuzhiyun * Set encoding and frame check sequence (FCS) options.
7428*4882a593Smuzhiyun *
7429*4882a593Smuzhiyun * Return: 0 if success, otherwise error code
7430*4882a593Smuzhiyun */
hdlcdev_attach(struct net_device * dev,unsigned short encoding,unsigned short parity)7431*4882a593Smuzhiyun static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7432*4882a593Smuzhiyun unsigned short parity)
7433*4882a593Smuzhiyun {
7434*4882a593Smuzhiyun struct mgsl_struct *info = dev_to_port(dev);
7435*4882a593Smuzhiyun unsigned char new_encoding;
7436*4882a593Smuzhiyun unsigned short new_crctype;
7437*4882a593Smuzhiyun
7438*4882a593Smuzhiyun /* return error if TTY interface open */
7439*4882a593Smuzhiyun if (info->port.count)
7440*4882a593Smuzhiyun return -EBUSY;
7441*4882a593Smuzhiyun
7442*4882a593Smuzhiyun switch (encoding)
7443*4882a593Smuzhiyun {
7444*4882a593Smuzhiyun case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7445*4882a593Smuzhiyun case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7446*4882a593Smuzhiyun case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7447*4882a593Smuzhiyun case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7448*4882a593Smuzhiyun case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7449*4882a593Smuzhiyun default: return -EINVAL;
7450*4882a593Smuzhiyun }
7451*4882a593Smuzhiyun
7452*4882a593Smuzhiyun switch (parity)
7453*4882a593Smuzhiyun {
7454*4882a593Smuzhiyun case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7455*4882a593Smuzhiyun case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7456*4882a593Smuzhiyun case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7457*4882a593Smuzhiyun default: return -EINVAL;
7458*4882a593Smuzhiyun }
7459*4882a593Smuzhiyun
7460*4882a593Smuzhiyun info->params.encoding = new_encoding;
7461*4882a593Smuzhiyun info->params.crc_type = new_crctype;
7462*4882a593Smuzhiyun
7463*4882a593Smuzhiyun /* if network interface up, reprogram hardware */
7464*4882a593Smuzhiyun if (info->netcount)
7465*4882a593Smuzhiyun mgsl_program_hw(info);
7466*4882a593Smuzhiyun
7467*4882a593Smuzhiyun return 0;
7468*4882a593Smuzhiyun }
7469*4882a593Smuzhiyun
7470*4882a593Smuzhiyun /**
7471*4882a593Smuzhiyun * hdlcdev_xmit - called by generic HDLC layer to send a frame
7472*4882a593Smuzhiyun * @skb: socket buffer containing HDLC frame
7473*4882a593Smuzhiyun * @dev: pointer to network device structure
7474*4882a593Smuzhiyun */
hdlcdev_xmit(struct sk_buff * skb,struct net_device * dev)7475*4882a593Smuzhiyun static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7476*4882a593Smuzhiyun struct net_device *dev)
7477*4882a593Smuzhiyun {
7478*4882a593Smuzhiyun struct mgsl_struct *info = dev_to_port(dev);
7479*4882a593Smuzhiyun unsigned long flags;
7480*4882a593Smuzhiyun
7481*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
7482*4882a593Smuzhiyun printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7483*4882a593Smuzhiyun
7484*4882a593Smuzhiyun /* stop sending until this frame completes */
7485*4882a593Smuzhiyun netif_stop_queue(dev);
7486*4882a593Smuzhiyun
7487*4882a593Smuzhiyun /* copy data to device buffers */
7488*4882a593Smuzhiyun info->xmit_cnt = skb->len;
7489*4882a593Smuzhiyun mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7490*4882a593Smuzhiyun
7491*4882a593Smuzhiyun /* update network statistics */
7492*4882a593Smuzhiyun dev->stats.tx_packets++;
7493*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
7494*4882a593Smuzhiyun
7495*4882a593Smuzhiyun /* done with socket buffer, so free it */
7496*4882a593Smuzhiyun dev_kfree_skb(skb);
7497*4882a593Smuzhiyun
7498*4882a593Smuzhiyun /* save start time for transmit timeout detection */
7499*4882a593Smuzhiyun netif_trans_update(dev);
7500*4882a593Smuzhiyun
7501*4882a593Smuzhiyun /* start hardware transmitter if necessary */
7502*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7503*4882a593Smuzhiyun if (!info->tx_active)
7504*4882a593Smuzhiyun usc_start_transmitter(info);
7505*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7506*4882a593Smuzhiyun
7507*4882a593Smuzhiyun return NETDEV_TX_OK;
7508*4882a593Smuzhiyun }
7509*4882a593Smuzhiyun
7510*4882a593Smuzhiyun /**
7511*4882a593Smuzhiyun * hdlcdev_open - called by network layer when interface enabled
7512*4882a593Smuzhiyun * @dev: pointer to network device structure
7513*4882a593Smuzhiyun *
7514*4882a593Smuzhiyun * Claim resources and initialize hardware.
7515*4882a593Smuzhiyun *
7516*4882a593Smuzhiyun * Return: 0 if success, otherwise error code
7517*4882a593Smuzhiyun */
hdlcdev_open(struct net_device * dev)7518*4882a593Smuzhiyun static int hdlcdev_open(struct net_device *dev)
7519*4882a593Smuzhiyun {
7520*4882a593Smuzhiyun struct mgsl_struct *info = dev_to_port(dev);
7521*4882a593Smuzhiyun int rc;
7522*4882a593Smuzhiyun unsigned long flags;
7523*4882a593Smuzhiyun
7524*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
7525*4882a593Smuzhiyun printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7526*4882a593Smuzhiyun
7527*4882a593Smuzhiyun /* generic HDLC layer open processing */
7528*4882a593Smuzhiyun rc = hdlc_open(dev);
7529*4882a593Smuzhiyun if (rc)
7530*4882a593Smuzhiyun return rc;
7531*4882a593Smuzhiyun
7532*4882a593Smuzhiyun /* arbitrate between network and tty opens */
7533*4882a593Smuzhiyun spin_lock_irqsave(&info->netlock, flags);
7534*4882a593Smuzhiyun if (info->port.count != 0 || info->netcount != 0) {
7535*4882a593Smuzhiyun printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7536*4882a593Smuzhiyun spin_unlock_irqrestore(&info->netlock, flags);
7537*4882a593Smuzhiyun return -EBUSY;
7538*4882a593Smuzhiyun }
7539*4882a593Smuzhiyun info->netcount=1;
7540*4882a593Smuzhiyun spin_unlock_irqrestore(&info->netlock, flags);
7541*4882a593Smuzhiyun
7542*4882a593Smuzhiyun /* claim resources and init adapter */
7543*4882a593Smuzhiyun if ((rc = startup(info)) != 0) {
7544*4882a593Smuzhiyun spin_lock_irqsave(&info->netlock, flags);
7545*4882a593Smuzhiyun info->netcount=0;
7546*4882a593Smuzhiyun spin_unlock_irqrestore(&info->netlock, flags);
7547*4882a593Smuzhiyun return rc;
7548*4882a593Smuzhiyun }
7549*4882a593Smuzhiyun
7550*4882a593Smuzhiyun /* assert RTS and DTR, apply hardware settings */
7551*4882a593Smuzhiyun info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
7552*4882a593Smuzhiyun mgsl_program_hw(info);
7553*4882a593Smuzhiyun
7554*4882a593Smuzhiyun /* enable network layer transmit */
7555*4882a593Smuzhiyun netif_trans_update(dev);
7556*4882a593Smuzhiyun netif_start_queue(dev);
7557*4882a593Smuzhiyun
7558*4882a593Smuzhiyun /* inform generic HDLC layer of current DCD status */
7559*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock, flags);
7560*4882a593Smuzhiyun usc_get_serial_signals(info);
7561*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock, flags);
7562*4882a593Smuzhiyun if (info->serial_signals & SerialSignal_DCD)
7563*4882a593Smuzhiyun netif_carrier_on(dev);
7564*4882a593Smuzhiyun else
7565*4882a593Smuzhiyun netif_carrier_off(dev);
7566*4882a593Smuzhiyun return 0;
7567*4882a593Smuzhiyun }
7568*4882a593Smuzhiyun
7569*4882a593Smuzhiyun /**
7570*4882a593Smuzhiyun * hdlcdev_close - called by network layer when interface is disabled
7571*4882a593Smuzhiyun * @dev: pointer to network device structure
7572*4882a593Smuzhiyun *
7573*4882a593Smuzhiyun * Shutdown hardware and release resources.
7574*4882a593Smuzhiyun *
7575*4882a593Smuzhiyun * Return: 0 if success, otherwise error code
7576*4882a593Smuzhiyun */
hdlcdev_close(struct net_device * dev)7577*4882a593Smuzhiyun static int hdlcdev_close(struct net_device *dev)
7578*4882a593Smuzhiyun {
7579*4882a593Smuzhiyun struct mgsl_struct *info = dev_to_port(dev);
7580*4882a593Smuzhiyun unsigned long flags;
7581*4882a593Smuzhiyun
7582*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
7583*4882a593Smuzhiyun printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7584*4882a593Smuzhiyun
7585*4882a593Smuzhiyun netif_stop_queue(dev);
7586*4882a593Smuzhiyun
7587*4882a593Smuzhiyun /* shutdown adapter and release resources */
7588*4882a593Smuzhiyun shutdown(info);
7589*4882a593Smuzhiyun
7590*4882a593Smuzhiyun hdlc_close(dev);
7591*4882a593Smuzhiyun
7592*4882a593Smuzhiyun spin_lock_irqsave(&info->netlock, flags);
7593*4882a593Smuzhiyun info->netcount=0;
7594*4882a593Smuzhiyun spin_unlock_irqrestore(&info->netlock, flags);
7595*4882a593Smuzhiyun
7596*4882a593Smuzhiyun return 0;
7597*4882a593Smuzhiyun }
7598*4882a593Smuzhiyun
7599*4882a593Smuzhiyun /**
7600*4882a593Smuzhiyun * hdlcdev_ioctl - called by network layer to process IOCTL call to network device
7601*4882a593Smuzhiyun * @dev: pointer to network device structure
7602*4882a593Smuzhiyun * @ifr: pointer to network interface request structure
7603*4882a593Smuzhiyun * @cmd: IOCTL command code
7604*4882a593Smuzhiyun *
7605*4882a593Smuzhiyun * Return: 0 if success, otherwise error code
7606*4882a593Smuzhiyun */
hdlcdev_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7607*4882a593Smuzhiyun static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7608*4882a593Smuzhiyun {
7609*4882a593Smuzhiyun const size_t size = sizeof(sync_serial_settings);
7610*4882a593Smuzhiyun sync_serial_settings new_line;
7611*4882a593Smuzhiyun sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7612*4882a593Smuzhiyun struct mgsl_struct *info = dev_to_port(dev);
7613*4882a593Smuzhiyun unsigned int flags;
7614*4882a593Smuzhiyun
7615*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
7616*4882a593Smuzhiyun printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7617*4882a593Smuzhiyun
7618*4882a593Smuzhiyun /* return error if TTY interface open */
7619*4882a593Smuzhiyun if (info->port.count)
7620*4882a593Smuzhiyun return -EBUSY;
7621*4882a593Smuzhiyun
7622*4882a593Smuzhiyun if (cmd != SIOCWANDEV)
7623*4882a593Smuzhiyun return hdlc_ioctl(dev, ifr, cmd);
7624*4882a593Smuzhiyun
7625*4882a593Smuzhiyun switch(ifr->ifr_settings.type) {
7626*4882a593Smuzhiyun case IF_GET_IFACE: /* return current sync_serial_settings */
7627*4882a593Smuzhiyun
7628*4882a593Smuzhiyun ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7629*4882a593Smuzhiyun if (ifr->ifr_settings.size < size) {
7630*4882a593Smuzhiyun ifr->ifr_settings.size = size; /* data size wanted */
7631*4882a593Smuzhiyun return -ENOBUFS;
7632*4882a593Smuzhiyun }
7633*4882a593Smuzhiyun
7634*4882a593Smuzhiyun flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7635*4882a593Smuzhiyun HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7636*4882a593Smuzhiyun HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7637*4882a593Smuzhiyun HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7638*4882a593Smuzhiyun
7639*4882a593Smuzhiyun memset(&new_line, 0, sizeof(new_line));
7640*4882a593Smuzhiyun switch (flags){
7641*4882a593Smuzhiyun case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7642*4882a593Smuzhiyun case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7643*4882a593Smuzhiyun case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7644*4882a593Smuzhiyun case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7645*4882a593Smuzhiyun default: new_line.clock_type = CLOCK_DEFAULT;
7646*4882a593Smuzhiyun }
7647*4882a593Smuzhiyun
7648*4882a593Smuzhiyun new_line.clock_rate = info->params.clock_speed;
7649*4882a593Smuzhiyun new_line.loopback = info->params.loopback ? 1:0;
7650*4882a593Smuzhiyun
7651*4882a593Smuzhiyun if (copy_to_user(line, &new_line, size))
7652*4882a593Smuzhiyun return -EFAULT;
7653*4882a593Smuzhiyun return 0;
7654*4882a593Smuzhiyun
7655*4882a593Smuzhiyun case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7656*4882a593Smuzhiyun
7657*4882a593Smuzhiyun if(!capable(CAP_NET_ADMIN))
7658*4882a593Smuzhiyun return -EPERM;
7659*4882a593Smuzhiyun if (copy_from_user(&new_line, line, size))
7660*4882a593Smuzhiyun return -EFAULT;
7661*4882a593Smuzhiyun
7662*4882a593Smuzhiyun switch (new_line.clock_type)
7663*4882a593Smuzhiyun {
7664*4882a593Smuzhiyun case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7665*4882a593Smuzhiyun case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7666*4882a593Smuzhiyun case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7667*4882a593Smuzhiyun case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7668*4882a593Smuzhiyun case CLOCK_DEFAULT: flags = info->params.flags &
7669*4882a593Smuzhiyun (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7670*4882a593Smuzhiyun HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7671*4882a593Smuzhiyun HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7672*4882a593Smuzhiyun HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7673*4882a593Smuzhiyun default: return -EINVAL;
7674*4882a593Smuzhiyun }
7675*4882a593Smuzhiyun
7676*4882a593Smuzhiyun if (new_line.loopback != 0 && new_line.loopback != 1)
7677*4882a593Smuzhiyun return -EINVAL;
7678*4882a593Smuzhiyun
7679*4882a593Smuzhiyun info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7680*4882a593Smuzhiyun HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7681*4882a593Smuzhiyun HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7682*4882a593Smuzhiyun HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7683*4882a593Smuzhiyun info->params.flags |= flags;
7684*4882a593Smuzhiyun
7685*4882a593Smuzhiyun info->params.loopback = new_line.loopback;
7686*4882a593Smuzhiyun
7687*4882a593Smuzhiyun if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7688*4882a593Smuzhiyun info->params.clock_speed = new_line.clock_rate;
7689*4882a593Smuzhiyun else
7690*4882a593Smuzhiyun info->params.clock_speed = 0;
7691*4882a593Smuzhiyun
7692*4882a593Smuzhiyun /* if network interface up, reprogram hardware */
7693*4882a593Smuzhiyun if (info->netcount)
7694*4882a593Smuzhiyun mgsl_program_hw(info);
7695*4882a593Smuzhiyun return 0;
7696*4882a593Smuzhiyun
7697*4882a593Smuzhiyun default:
7698*4882a593Smuzhiyun return hdlc_ioctl(dev, ifr, cmd);
7699*4882a593Smuzhiyun }
7700*4882a593Smuzhiyun }
7701*4882a593Smuzhiyun
7702*4882a593Smuzhiyun /**
7703*4882a593Smuzhiyun * hdlcdev_tx_timeout - called by network layer when transmit timeout is detected
7704*4882a593Smuzhiyun *
7705*4882a593Smuzhiyun * @dev: pointer to network device structure
7706*4882a593Smuzhiyun */
hdlcdev_tx_timeout(struct net_device * dev,unsigned int txqueue)7707*4882a593Smuzhiyun static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
7708*4882a593Smuzhiyun {
7709*4882a593Smuzhiyun struct mgsl_struct *info = dev_to_port(dev);
7710*4882a593Smuzhiyun unsigned long flags;
7711*4882a593Smuzhiyun
7712*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
7713*4882a593Smuzhiyun printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7714*4882a593Smuzhiyun
7715*4882a593Smuzhiyun dev->stats.tx_errors++;
7716*4882a593Smuzhiyun dev->stats.tx_aborted_errors++;
7717*4882a593Smuzhiyun
7718*4882a593Smuzhiyun spin_lock_irqsave(&info->irq_spinlock,flags);
7719*4882a593Smuzhiyun usc_stop_transmitter(info);
7720*4882a593Smuzhiyun spin_unlock_irqrestore(&info->irq_spinlock,flags);
7721*4882a593Smuzhiyun
7722*4882a593Smuzhiyun netif_wake_queue(dev);
7723*4882a593Smuzhiyun }
7724*4882a593Smuzhiyun
7725*4882a593Smuzhiyun /**
7726*4882a593Smuzhiyun * hdlcdev_tx_done - called by device driver when transmit completes
7727*4882a593Smuzhiyun * @info: pointer to device instance information
7728*4882a593Smuzhiyun *
7729*4882a593Smuzhiyun * Reenable network layer transmit if stopped.
7730*4882a593Smuzhiyun */
hdlcdev_tx_done(struct mgsl_struct * info)7731*4882a593Smuzhiyun static void hdlcdev_tx_done(struct mgsl_struct *info)
7732*4882a593Smuzhiyun {
7733*4882a593Smuzhiyun if (netif_queue_stopped(info->netdev))
7734*4882a593Smuzhiyun netif_wake_queue(info->netdev);
7735*4882a593Smuzhiyun }
7736*4882a593Smuzhiyun
7737*4882a593Smuzhiyun /**
7738*4882a593Smuzhiyun * hdlcdev_rx - called by device driver when frame received
7739*4882a593Smuzhiyun * @info: pointer to device instance information
7740*4882a593Smuzhiyun * @buf: pointer to buffer contianing frame data
7741*4882a593Smuzhiyun * @size: count of data bytes in buf
7742*4882a593Smuzhiyun *
7743*4882a593Smuzhiyun * Pass frame to network layer.
7744*4882a593Smuzhiyun */
hdlcdev_rx(struct mgsl_struct * info,char * buf,int size)7745*4882a593Smuzhiyun static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7746*4882a593Smuzhiyun {
7747*4882a593Smuzhiyun struct sk_buff *skb = dev_alloc_skb(size);
7748*4882a593Smuzhiyun struct net_device *dev = info->netdev;
7749*4882a593Smuzhiyun
7750*4882a593Smuzhiyun if (debug_level >= DEBUG_LEVEL_INFO)
7751*4882a593Smuzhiyun printk("hdlcdev_rx(%s)\n", dev->name);
7752*4882a593Smuzhiyun
7753*4882a593Smuzhiyun if (skb == NULL) {
7754*4882a593Smuzhiyun printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7755*4882a593Smuzhiyun dev->name);
7756*4882a593Smuzhiyun dev->stats.rx_dropped++;
7757*4882a593Smuzhiyun return;
7758*4882a593Smuzhiyun }
7759*4882a593Smuzhiyun
7760*4882a593Smuzhiyun skb_put_data(skb, buf, size);
7761*4882a593Smuzhiyun
7762*4882a593Smuzhiyun skb->protocol = hdlc_type_trans(skb, dev);
7763*4882a593Smuzhiyun
7764*4882a593Smuzhiyun dev->stats.rx_packets++;
7765*4882a593Smuzhiyun dev->stats.rx_bytes += size;
7766*4882a593Smuzhiyun
7767*4882a593Smuzhiyun netif_rx(skb);
7768*4882a593Smuzhiyun }
7769*4882a593Smuzhiyun
7770*4882a593Smuzhiyun static const struct net_device_ops hdlcdev_ops = {
7771*4882a593Smuzhiyun .ndo_open = hdlcdev_open,
7772*4882a593Smuzhiyun .ndo_stop = hdlcdev_close,
7773*4882a593Smuzhiyun .ndo_start_xmit = hdlc_start_xmit,
7774*4882a593Smuzhiyun .ndo_do_ioctl = hdlcdev_ioctl,
7775*4882a593Smuzhiyun .ndo_tx_timeout = hdlcdev_tx_timeout,
7776*4882a593Smuzhiyun };
7777*4882a593Smuzhiyun
7778*4882a593Smuzhiyun /**
7779*4882a593Smuzhiyun * hdlcdev_init - called by device driver when adding device instance
7780*4882a593Smuzhiyun * @info: pointer to device instance information
7781*4882a593Smuzhiyun *
7782*4882a593Smuzhiyun * Do generic HDLC initialization.
7783*4882a593Smuzhiyun *
7784*4882a593Smuzhiyun * Return: 0 if success, otherwise error code
7785*4882a593Smuzhiyun */
hdlcdev_init(struct mgsl_struct * info)7786*4882a593Smuzhiyun static int hdlcdev_init(struct mgsl_struct *info)
7787*4882a593Smuzhiyun {
7788*4882a593Smuzhiyun int rc;
7789*4882a593Smuzhiyun struct net_device *dev;
7790*4882a593Smuzhiyun hdlc_device *hdlc;
7791*4882a593Smuzhiyun
7792*4882a593Smuzhiyun /* allocate and initialize network and HDLC layer objects */
7793*4882a593Smuzhiyun
7794*4882a593Smuzhiyun dev = alloc_hdlcdev(info);
7795*4882a593Smuzhiyun if (!dev) {
7796*4882a593Smuzhiyun printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
7797*4882a593Smuzhiyun return -ENOMEM;
7798*4882a593Smuzhiyun }
7799*4882a593Smuzhiyun
7800*4882a593Smuzhiyun /* for network layer reporting purposes only */
7801*4882a593Smuzhiyun dev->base_addr = info->io_base;
7802*4882a593Smuzhiyun dev->irq = info->irq_level;
7803*4882a593Smuzhiyun dev->dma = info->dma_level;
7804*4882a593Smuzhiyun
7805*4882a593Smuzhiyun /* network layer callbacks and settings */
7806*4882a593Smuzhiyun dev->netdev_ops = &hdlcdev_ops;
7807*4882a593Smuzhiyun dev->watchdog_timeo = 10 * HZ;
7808*4882a593Smuzhiyun dev->tx_queue_len = 50;
7809*4882a593Smuzhiyun
7810*4882a593Smuzhiyun /* generic HDLC layer callbacks and settings */
7811*4882a593Smuzhiyun hdlc = dev_to_hdlc(dev);
7812*4882a593Smuzhiyun hdlc->attach = hdlcdev_attach;
7813*4882a593Smuzhiyun hdlc->xmit = hdlcdev_xmit;
7814*4882a593Smuzhiyun
7815*4882a593Smuzhiyun /* register objects with HDLC layer */
7816*4882a593Smuzhiyun rc = register_hdlc_device(dev);
7817*4882a593Smuzhiyun if (rc) {
7818*4882a593Smuzhiyun printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
7819*4882a593Smuzhiyun free_netdev(dev);
7820*4882a593Smuzhiyun return rc;
7821*4882a593Smuzhiyun }
7822*4882a593Smuzhiyun
7823*4882a593Smuzhiyun info->netdev = dev;
7824*4882a593Smuzhiyun return 0;
7825*4882a593Smuzhiyun }
7826*4882a593Smuzhiyun
7827*4882a593Smuzhiyun /**
7828*4882a593Smuzhiyun * hdlcdev_exit - called by device driver when removing device instance
7829*4882a593Smuzhiyun * @info: pointer to device instance information
7830*4882a593Smuzhiyun *
7831*4882a593Smuzhiyun * Do generic HDLC cleanup.
7832*4882a593Smuzhiyun */
hdlcdev_exit(struct mgsl_struct * info)7833*4882a593Smuzhiyun static void hdlcdev_exit(struct mgsl_struct *info)
7834*4882a593Smuzhiyun {
7835*4882a593Smuzhiyun unregister_hdlc_device(info->netdev);
7836*4882a593Smuzhiyun free_netdev(info->netdev);
7837*4882a593Smuzhiyun info->netdev = NULL;
7838*4882a593Smuzhiyun }
7839*4882a593Smuzhiyun
7840*4882a593Smuzhiyun #endif /* CONFIG_HDLC */
7841*4882a593Smuzhiyun
7842*4882a593Smuzhiyun
synclink_init_one(struct pci_dev * dev,const struct pci_device_id * ent)7843*4882a593Smuzhiyun static int synclink_init_one (struct pci_dev *dev,
7844*4882a593Smuzhiyun const struct pci_device_id *ent)
7845*4882a593Smuzhiyun {
7846*4882a593Smuzhiyun struct mgsl_struct *info;
7847*4882a593Smuzhiyun
7848*4882a593Smuzhiyun if (pci_enable_device(dev)) {
7849*4882a593Smuzhiyun printk("error enabling pci device %p\n", dev);
7850*4882a593Smuzhiyun return -EIO;
7851*4882a593Smuzhiyun }
7852*4882a593Smuzhiyun
7853*4882a593Smuzhiyun info = mgsl_allocate_device();
7854*4882a593Smuzhiyun if (!info) {
7855*4882a593Smuzhiyun printk("can't allocate device instance data.\n");
7856*4882a593Smuzhiyun return -EIO;
7857*4882a593Smuzhiyun }
7858*4882a593Smuzhiyun
7859*4882a593Smuzhiyun /* Copy user configuration info to device instance data */
7860*4882a593Smuzhiyun
7861*4882a593Smuzhiyun info->io_base = pci_resource_start(dev, 2);
7862*4882a593Smuzhiyun info->irq_level = dev->irq;
7863*4882a593Smuzhiyun info->phys_memory_base = pci_resource_start(dev, 3);
7864*4882a593Smuzhiyun
7865*4882a593Smuzhiyun /* Because veremap only works on page boundaries we must map
7866*4882a593Smuzhiyun * a larger area than is actually implemented for the LCR
7867*4882a593Smuzhiyun * memory range. We map a full page starting at the page boundary.
7868*4882a593Smuzhiyun */
7869*4882a593Smuzhiyun info->phys_lcr_base = pci_resource_start(dev, 0);
7870*4882a593Smuzhiyun info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
7871*4882a593Smuzhiyun info->phys_lcr_base &= ~(PAGE_SIZE-1);
7872*4882a593Smuzhiyun
7873*4882a593Smuzhiyun info->io_addr_size = 8;
7874*4882a593Smuzhiyun info->irq_flags = IRQF_SHARED;
7875*4882a593Smuzhiyun
7876*4882a593Smuzhiyun if (dev->device == 0x0210) {
7877*4882a593Smuzhiyun /* Version 1 PCI9030 based universal PCI adapter */
7878*4882a593Smuzhiyun info->misc_ctrl_value = 0x007c4080;
7879*4882a593Smuzhiyun info->hw_version = 1;
7880*4882a593Smuzhiyun } else {
7881*4882a593Smuzhiyun /* Version 0 PCI9050 based 5V PCI adapter
7882*4882a593Smuzhiyun * A PCI9050 bug prevents reading LCR registers if
7883*4882a593Smuzhiyun * LCR base address bit 7 is set. Maintain shadow
7884*4882a593Smuzhiyun * value so we can write to LCR misc control reg.
7885*4882a593Smuzhiyun */
7886*4882a593Smuzhiyun info->misc_ctrl_value = 0x087e4546;
7887*4882a593Smuzhiyun info->hw_version = 0;
7888*4882a593Smuzhiyun }
7889*4882a593Smuzhiyun
7890*4882a593Smuzhiyun mgsl_add_device(info);
7891*4882a593Smuzhiyun
7892*4882a593Smuzhiyun return 0;
7893*4882a593Smuzhiyun }
7894*4882a593Smuzhiyun
synclink_remove_one(struct pci_dev * dev)7895*4882a593Smuzhiyun static void synclink_remove_one (struct pci_dev *dev)
7896*4882a593Smuzhiyun {
7897*4882a593Smuzhiyun }
7898*4882a593Smuzhiyun
7899